From 4b8541cbb8ae212fa2137412212eb982a42a170d Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 20 Jan 2020 17:32:08 -0500 Subject: [PATCH 01/59] Imported source files from QEMU v1.0.0 target-arm/libcpu Signed-off-by: chaojixx --- CMakeLists.txt | 33 +- include/cpu/apic.h | 10 + include/cpu/arm/cpu.h | 216 + include/cpu/arm/defs.h | 34 + include/cpu/arm/helper.h | 47 + include/cpu/arm/kvm_arm.h | 300 + include/cpu/exec.h | 9 +- include/cpu/kvm.h | 17 +- include/cpu/memdbg.h | 6 + src/CMakeLists.txt | 76 +- src/bswap.h | 6 +- src/cpu-all.h | 11 +- src/cpu-exec.c | 165 +- src/cpus.c | 2 + src/disas.c | 7 + src/exec-all.h | 7 +- src/exec-ram.h | 17 +- src/exec-tb.c | 1 - src/exec.c | 4 +- src/exec.h | 3 +- src/fpu/softfloat.c | 6 +- src/memory.c | 1 + src/target-arm/cpu.h | 486 ++ src/target-arm/helper.c | 3026 +++++++++ src/target-arm/helper.h | 506 ++ src/target-arm/iwmmxt_helper.c | 681 +++ src/target-arm/neon_helper.c | 2020 ++++++ src/target-arm/op_addsub.h | 103 + src/target-arm/op_helper.c | 463 ++ src/target-arm/translate.c | 10207 +++++++++++++++++++++++++++++++ src/translate-all.c | 17 +- 31 files changed, 18385 insertions(+), 102 deletions(-) create mode 100644 include/cpu/arm/cpu.h create mode 100644 include/cpu/arm/defs.h create mode 100644 include/cpu/arm/helper.h create mode 100644 include/cpu/arm/kvm_arm.h create mode 100644 src/target-arm/cpu.h create mode 100644 src/target-arm/helper.c create mode 100644 src/target-arm/helper.h create mode 100644 src/target-arm/iwmmxt_helper.c create mode 100644 src/target-arm/neon_helper.c create mode 100644 src/target-arm/op_addsub.h create mode 100644 src/target-arm/op_helper.c create mode 100644 src/target-arm/translate.c diff --git a/CMakeLists.txt b/CMakeLists.txt index c328a88..89c3168 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -53,14 +53,11 @@ file(WRITE ${CONFIG_TARGET_H} #define TARGET_SHORT_ALIGNMENT 2 #define TARGET_INT_ALIGNMENT 4 #define TARGET_LLONG_ALIGNMENT 8 -#define TARGET_I386 1 -#define TARGET_PHYS_ADDR_BITS 64 #define CONFIG_SOFTMMU 1 -#define CONFIG_I386_DIS 1 " ) -if (S2EGUEST_INCLUDE_DIR) +if(S2EGUEST_INCLUDE_DIR) file(APPEND ${CONFIG_TARGET_H} "#define CONFIG_SYMBEX_OPCODES 1\n") endif() @@ -72,13 +69,25 @@ message(STATUS "WITH_TARGET: ${WITH_TARGET}") if(WITH_TARGET MATCHES "i386") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARCH \"i386\"\n") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_I386 1\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_LONG_ALIGNMENT 4\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_INSN_START_EXTRA_WORDS 1\n") + + set(TARGET_DIR "target-i386") elseif(WITH_TARGET MATCHES "x86_64") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARCH \"x86_64\"\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_X86_64 1\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_LONG_ALIGNMENT 8\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_INSN_START_EXTRA_WORDS 1\n") + + set(TARGET_DIR "target-i386") +elseif(WITH_TARGET MATCHES "arm") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARCH \"arm\"\n") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARM 1\n") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_LONG_ALIGNMENT 4\n") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_INSN_START_EXTRA_WORDS 1\n") + + set(TARGET_DIR "target-arm") else() message(FATAL_ERROR "Incorrect target ${WITH_TARGET}") endif() @@ -93,15 +102,13 @@ if(WITH_TARGET MATCHES "s2e") endif() # We want to keep NDEBUG in all builds -foreach (flags_var_to_scrub - CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_MINSIZEREL - CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_C_FLAGS_MINSIZEREL) -string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " " - "${flags_var_to_scrub}" "${${flags_var_to_scrub}}") +foreach(flags_var_to_scrub CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS_MINSIZEREL) + string(REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " " "${flags_var_to_scrub}" "${${flags_var_to_scrub}}") endforeach() include_directories(${GLIB_PKG_INCLUDE_DIRS} diff --git a/include/cpu/apic.h b/include/cpu/apic.h index b2e5ec2..d7cb5ef 100644 --- a/include/cpu/apic.h +++ b/include/cpu/apic.h @@ -19,14 +19,24 @@ #ifndef APIC_H #define APIC_H +#include + +#if defined(TARGET_I386) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif #include #include + #ifdef __cplusplus extern "C" { #endif + struct DeviceState; typedef struct DeviceState DeviceState; diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h new file mode 100644 index 0000000..b70d327 --- /dev/null +++ b/include/cpu/arm/cpu.h @@ -0,0 +1,216 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2017 Adrian Herrera +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#ifndef __LIBCPU_ARM_CPU_H__ +#define __LIBCPU_ARM_CPU_H__ + +#include + +#include +#include +#include +#include + +//#define CPUState struct CPUARMState + +#define CPUArchState struct CPUARMState + +#include "defs.h" + +typedef void ARMWriteCPFunc(void *opaque, int cp_info, int srcreg, int operand, uint32_t value); +typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, int dstreg, int operand); + +/* + * We currently assume float and double are IEEE single and double precision respectively. Doing runtime conversions is + * tricky because VFP registers may contain integer values (eg. as the result of a FTOSI instruction). + * + * s<2n> maps to the least significant half of d + * s<2n+1> maps to the most significant half of d + */ + +typedef struct CPUARMState { + uint32_t spsr; + + /* Banked registers. */ + uint32_t banked_spsr[6]; + uint32_t banked_r13[6]; + uint32_t banked_r14[6]; + + /* These hold r8-r12. */ + uint32_t usr_regs[5]; + uint32_t fiq_regs[5]; + + /* cpsr flag cache for faster execution */ + uint32_t CF; /* 0 or 1 */ + uint32_t VF; /* V is the bit 31. All other bits are undefined */ + uint32_t NF; /* N is bit 31. All other bits are undefined. */ + uint32_t ZF; /* Z set if zero. */ + + /* + * Regs for current mode. + * + * regs[15] is the border between concrete and symbolic area, i.e., regs[15] is in concrete-only-area + */ + uint32_t regs[16]; + + uint32_t QF; /* 0 or 1 */ + uint32_t GE; /* cpsr[19:16] */ + uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ + uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ + + /* + * Frequently accessed CPSR bits are stored separately for efficiently. This contains all the other bits. Use + * cpsr_{read,write} to access the whole CPSR. + */ + uint32_t uncached_cpsr; + + /* System control coprocessor (cp15) */ + struct { + uint32_t c0_cpuid; + uint32_t c0_cachetype; + uint32_t c0_ccsid[16]; /* Cache size. */ + uint32_t c0_clid; /* Cache level. */ + uint32_t c0_cssel; /* Cache size selection. */ + uint32_t c0_c1[8]; /* Feature registers. */ + uint32_t c0_c2[8]; /* Instruction set registers. */ + uint32_t c1_sys; /* System control register. */ + uint32_t c1_coproc; /* Coprocessor access register. */ + uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ + uint32_t c1_scr; /* secure config register. */ + uint32_t c2_base0; /* MMU translation table base 0. */ + uint32_t c2_base1; /* MMU translation table base 1. */ + uint32_t c2_control; /* MMU translation table base control. */ + uint32_t c2_mask; /* MMU translation table base selection mask. */ + uint32_t c2_base_mask; /* MMU translation table base 0 mask. */ + uint32_t c2_data; /* MPU data cachable bits. */ + uint32_t c2_insn; /* MPU instruction cachable bits. */ + uint32_t c3; /* MMU domain access control register + MPU write buffer control. */ + uint32_t c5_insn; /* Fault status registers. */ + uint32_t c5_data; + uint32_t c6_region[8]; /* MPU base/size registers. */ + uint32_t c6_insn; /* Fault address registers. */ + uint32_t c6_data; + uint32_t c7_par; /* Translation result. */ + uint32_t c9_insn; /* Cache lockdown registers. */ + uint32_t c9_data; + uint32_t c9_pmcr; /* performance monitor control register */ + uint32_t c9_pmcnten; /* perf monitor counter enables */ + uint32_t c9_pmovsr; /* perf monitor overflow status */ + uint32_t c9_pmxevtyper; /* perf monitor event type */ + uint32_t c9_pmuserenr; /* perf monitor user enable */ + uint32_t c9_pminten; /* perf monitor interrupt enables */ + uint32_t c13_fcse; /* FCSE PID. */ + uint32_t c13_context; /* Context ID. */ + uint32_t c13_tls1; /* User RW Thread register. */ + uint32_t c13_tls2; /* User RO Thread register. */ + uint32_t c13_tls3; /* Privileged Thread register. */ + uint32_t c15_cpar; /* XScale Coprocessor Access Register */ + uint32_t c15_ticonfig; /* TI925T configuration byte. */ + uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ + uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ + uint32_t c15_threadid; /* TI debugger thread-ID. */ + uint32_t c15_config_base_address; /* SCU base address. */ + uint32_t c15_diagnostic; /* diagnostic register */ + uint32_t c15_power_diagnostic; + uint32_t c15_power_control; /* power control */ + } cp15; + + struct { + uint32_t other_sp; + uint32_t vecbase; + uint32_t basepri; + uint32_t control; + int current_sp; + int exception; + int pending_exception; + } v7m; + + /* Thumb-2 EE state. */ + uint32_t teecr; + uint32_t teehbr; + + + /* VFP coprocessor state. */ + struct { + float64 regs[32]; + + uint32_t xregs[16]; + /* We store these fpcsr fields separately for convenience. */ + int vec_len; + int vec_stride; + + /* scratch space when Tn are not sufficient. */ + uint32_t scratch[8]; + + /* fp_status is the "normal" fp status. standard_fp_status retains + * values corresponding to the ARM "Standard FPSCR Value", ie + * default-NaN, flush-to-zero, round-to-nearest and is used by + * any operations (generally Neon) which the architecture defines + * as controlled by the standard FPSCR value rather than the FPSCR. + * + * To avoid having to transfer exception bits around, we simply + * say that the FPSCR cumulative exception flags are the logical + * OR of the flags in the two fp statuses. This relies on the + * only thing which needs to read the exception flags being + * an explicit FPSCR read. + */ + float_status fp_status; + float_status standard_fp_status; + } vfp; + uint32_t exclusive_addr; + uint32_t exclusive_val; + uint32_t exclusive_high; + + /* iwMMXt coprocessor state. */ + struct { + uint64_t regs[16]; + uint64_t val; + + uint32_t cregs[16]; + } iwmmxt; + + /* For mixed endian mode. */ + bool bswap_code; + + CPU_COMMON + + /* These fields after the common ones so they are preserved on reset. */ + + /* Internal CPU feature flags. */ + uint32_t features; + /* Coprocessor IO used by peripherals */ + struct { + ARMReadCPFunc *cp_read; + ARMWriteCPFunc *cp_write; + void *opaque; + } cp[15]; + void *nvic; + const struct arm_boot_info *boot_info; + + /* For KVM */ + int kvm_request_interrupt_window; + int kvm_irq; + +} CPUARMState; +CPUARMState *cpu_arm_init(const char *cpu_model); +int cpu_arm_exec(CPUARMState *s); + + + +#endif diff --git a/include/cpu/arm/defs.h b/include/cpu/arm/defs.h new file mode 100644 index 0000000..8164712 --- /dev/null +++ b/include/cpu/arm/defs.h @@ -0,0 +1,34 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2017 Adrian Herrera +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#ifndef __CPU_ARM_DEFS__ +#define __CPU_ARM_DEFS__ + +// clang-format off + +/*******************************************/ + +#define NB_MMU_MODES 2 +/* The ARM MMU allows 1k pages. */ +/* ??? Linux doesn't actually use these, and they're deprecated in recent + architecture revisions. Maybe a configure option to disable them. */ +#define TARGET_PAGE_BITS 10 + +#define TARGET_HAS_ICE 1 + +#endif diff --git a/include/cpu/arm/helper.h b/include/cpu/arm/helper.h new file mode 100644 index 0000000..fb618f8 --- /dev/null +++ b/include/cpu/arm/helper.h @@ -0,0 +1,47 @@ +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2016 Cyberhaven +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . +/// +#ifndef __LIBCPU_ARM_HELPER_H__ + +#define __LIBCPU_ARM_HELPER_H__ +#define _M_CF (1 << 1) +#define _M_VF (1 << 2) +#define _M_NF (1 << 3) +#define _M_ZF (1 << 4) +#define _M_R0 (1 << 5) +#define _M_R1 (1 << 6) +#define _M_R2 (1 << 7) +#define _M_R3 (1 << 8) +#define _M_R4 (1 << 9) +#define _M_R5 (1 << 10) +#define _M_R6 (1 << 11) +#define _M_R7 (1 << 12) +#define _M_R8 (1 << 13) +#define _M_R9 (1 << 14) +#define _M_R10 (1 << 15) +#define _M_R11 (1 << 16) +#define _M_R12 (1 << 17) +#define _M_R13 (1 << 18) +#define _M_R14 (1 << 19) +#define _M_SPSR (1 << 20) +#define _M_BANKED_SPSR ((unsigned long int) (63) << 21) +#define _M_BANKED_R13 ((unsigned long int) (63) << 27) +#define _M_BANKED_R14 ((unsigned long int) (63) << 33) +#define _M_USR_REGS ((unsigned long int) (32) << 39) +#define _M_REGS (32768 << 5) +#define _M_ALL ~((unsigned long int) (0) << 39) +#endif diff --git a/include/cpu/arm/kvm_arm.h b/include/cpu/arm/kvm_arm.h new file mode 100644 index 0000000..f950029 --- /dev/null +++ b/include/cpu/arm/kvm_arm.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __ARM_KVM_H__ +#define __ARM_KVM_H__ + +#include +#include +#include + +#define __KVM_HAVE_GUEST_DEBUG +#define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_VCPU_EVENTS + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ +#define KVM_ARM_SVC_sp svc_regs[0] +#define KVM_ARM_SVC_lr svc_regs[1] +#define KVM_ARM_SVC_spsr svc_regs[2] +#define KVM_ARM_ABT_sp abt_regs[0] +#define KVM_ARM_ABT_lr abt_regs[1] +#define KVM_ARM_ABT_spsr abt_regs[2] +#define KVM_ARM_UND_sp und_regs[0] +#define KVM_ARM_UND_lr und_regs[1] +#define KVM_ARM_UND_spsr und_regs[2] +#define KVM_ARM_IRQ_sp irq_regs[0] +#define KVM_ARM_IRQ_lr irq_regs[1] +#define KVM_ARM_IRQ_spsr irq_regs[2] + +/* Valid only for fiq_regs in struct kvm_regs */ +#define KVM_ARM_FIQ_r8 fiq_regs[0] +#define KVM_ARM_FIQ_r9 fiq_regs[1] +#define KVM_ARM_FIQ_r10 fiq_regs[2] +#define KVM_ARM_FIQ_fp fiq_regs[3] +#define KVM_ARM_FIQ_ip fiq_regs[4] +#define KVM_ARM_FIQ_sp fiq_regs[5] +#define KVM_ARM_FIQ_lr fiq_regs[6] +#define KVM_ARM_FIQ_spsr fiq_regs[7] + +struct kvm_regs { + struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ + unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ + unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ + unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ + unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ + unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ +}; + +/* Supported Processor Types */ +#define KVM_ARM_TARGET_CORTEX_A15 0 +#define KVM_ARM_TARGET_CORTEX_A7 1 +#define KVM_ARM_NUM_TARGETS 2 + +/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ +#define KVM_ARM_DEVICE_TYPE_SHIFT 0 +#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) +#define KVM_ARM_DEVICE_ID_SHIFT 16 +#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) + +/* Supported device IDs */ +#define KVM_ARM_DEVICE_VGIC_V2 0 + +/* Supported VGIC address types */ +#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 +#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 + +#define KVM_VGIC_V2_DIST_SIZE 0x1000 +#define KVM_VGIC_V2_CPU_SIZE 0x2000 + +/* Supported VGICv3 address types */ +#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 +#define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 + +#define KVM_VGIC_V3_DIST_SIZE SZ_64K +#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) +#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) + +#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ +#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ + +struct kvm_vcpu_init { + __u32 target; + __u32 features[7]; +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { +}; + +struct kvm_guest_debug_arch { +}; + +struct kvm_debug_exit_arch { +}; + +struct kvm_sync_regs { + /* Used with KVM_CAP_ARM_USER_IRQ */ + __u64 device_irq_level; +}; + +struct kvm_arch_memory_slot { +}; + +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 +#define KVM_REG_ARM_COPROC_SHIFT 16 +#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 +#define KVM_REG_ARM_32_OPC2_SHIFT 0 +#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 +#define KVM_REG_ARM_OPC1_SHIFT 3 +#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 +#define KVM_REG_ARM_CRM_SHIFT 7 +#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 +#define KVM_REG_ARM_32_CRN_SHIFT 11 +/* + * For KVM currently all guest registers are nonsecure, but we reserve a bit + * in the encoding to distinguish secure from nonsecure for AArch32 system + * registers that are banked by security. This is 1 for the secure banked + * register, and 0 for the nonsecure banked register or if the register is + * not banked by security. + */ +#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 +#define KVM_REG_ARM_SECURE_SHIFT 28 + +#define ARM_CP15_REG_SHIFT_MASK(x,n) \ + (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) + +#define __ARM_CP15_REG(op1,crn,crm,op2) \ + (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ + ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ + ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ + ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ + ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) + +#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) + +#define __ARM_CP15_REG64(op1,crm) \ + (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) +#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) + +/* PL1 Physical Timer Registers */ +#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) +#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) + +/* Virtual Timer Registers */ +#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) +#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) + +/* Normal registers are mapped as coprocessor 16. */ +#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) + +/* Some registers need more space to represent values. */ +#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 +#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 +#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) +#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF +#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 + +/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ +#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF +#define KVM_REG_ARM_VFP_BASE_REG 0x0 +#define KVM_REG_ARM_VFP_FPSID 0x1000 +#define KVM_REG_ARM_VFP_FPSCR 0x1001 +#define KVM_REG_ARM_VFP_MVFR1 0x1006 +#define KVM_REG_ARM_VFP_MVFR0 0x1007 +#define KVM_REG_ARM_VFP_FPEXC 0x1008 +#define KVM_REG_ARM_VFP_FPINST 0x1009 +#define KVM_REG_ARM_VFP_FPINST2 0x100A + +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 +#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \ + (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) +#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) +#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 +#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 +#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 +#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 +#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 +#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 +#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 +#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ + (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) +#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff +#define VGIC_LEVEL_INFO_LINE_LEVEL 0 + +/* Device Control API on vcpu fd */ +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 +#define KVM_ARM_VCPU_TIMER_CTRL 1 +#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 +#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 + +#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 +#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 +#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_TYPE_SHIFT 24 +#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_VCPU_SHIFT 16 +#define KVM_ARM_IRQ_VCPU_MASK 0xff +#define KVM_ARM_IRQ_NUM_SHIFT 0 +#define KVM_ARM_IRQ_NUM_MASK 0xffff + +/* irq_type field */ +#define KVM_ARM_IRQ_TYPE_CPU 0 +#define KVM_ARM_IRQ_TYPE_SPI 1 +#define KVM_ARM_IRQ_TYPE_PPI 2 + +/* out-of-kernel GIC cpu interrupt injection irq_number field */ +#define KVM_ARM_IRQ_CPU_IRQ 0 +#define KVM_ARM_IRQ_CPU_FIQ 1 + +/* + * This used to hold the highest supported SPI, but it is now obsolete + * and only here to provide source code level compatibility with older + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. + */ +#ifndef __KERNEL__ +#define KVM_ARM_IRQ_GIC_MAX 127 +#endif + +/* One single KVM irqchip, ie. the VGIC */ +#define KVM_NR_IRQCHIPS 1 + +/* PSCI interface */ +#define KVM_PSCI_FN_BASE 0x95c1ba5e +#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) + +#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) +#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) +#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) +#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) + +#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED + +#endif /* __ARM_KVM_H__ */ + diff --git a/include/cpu/exec.h b/include/cpu/exec.h index de971b0..f36f28c 100644 --- a/include/cpu/exec.h +++ b/include/cpu/exec.h @@ -17,11 +17,18 @@ /// License along with this library; if not, see . #ifndef __LIBCPU_EXEC_H__ - #define __LIBCPU_EXEC_H__ #include + +#if defined(TARGET_I386) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif + #include #include #include diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index f91492c..88d66ac 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -9,11 +9,22 @@ #include + #ifndef BIT #define BIT(n) (1 << (n)) #endif + +#include +#if defined(TARGET_I386)|| defined(TARGET_X86_64) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif + + #include #define KVM_API_VERSION 12 @@ -313,7 +324,8 @@ struct kvm_run { /* KVM_EXIT_SYSTEM_EVENT */ struct { #define KVM_SYSTEM_EVENT_SHUTDOWN 1 -#define KVM_SYSTEM_EVENT_RESET 2 +#define KVM_SYSTEM_EVENT_RESET 2 +#define KVM_SYSTEM_EVENT_CRASH 3 __u32 type; __u64 flags; } system_event; @@ -933,6 +945,9 @@ struct kvm_dirty_tlb { #define KVM_REG_SIZE_U512 0x0060000000000000ULL #define KVM_REG_SIZE_U1024 0x0070000000000000ULL +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + struct kvm_reg_list { __u64 n; /* number of regs */ __u64 reg[0]; diff --git a/include/cpu/memdbg.h b/include/cpu/memdbg.h index 28f272a..1ed5d56 100644 --- a/include/cpu/memdbg.h +++ b/include/cpu/memdbg.h @@ -19,7 +19,13 @@ #define __LIBCPU_MEMDBG_H__ #include +#if defined(TARGET_I386) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif #include #ifdef __cplusplus diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fca27c9..c751577 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,20 +1,33 @@ # Copyright 2016 - Cyberhaven # This work is licensed under the terms of the GNU LGPL, version 2.1 or later. -add_library( - cpu cpu-exec.c cpus.c exec.c exec-bp.c exec-log.c exec-memdbg.c exec-phys.c exec-phystb.c exec-ram.c exec-tb.c exec-tlb.c ioport.c memory.c timer.c translate-all.c - fpu/softfloat.c precise-pc.c - target-i386/cpuid.c target-i386/helper.c target-i386/op_helper.c target-i386/translate.c disas.c -) +# Make sure we include any source files in the target-specific directory +file(GLOB TARGET_SRC_FILES ${TARGET_DIR}/*.c) +add_library(cpu cpu-exec.c + cpus.c + exec.c + exec-bp.c + exec-log.c + exec-memdbg.c + exec-phys.c + exec-phystb.c + exec-ram.c + exec-tb.c + exec-tlb.c + ioport.c + memory.c + timer.c + translate-all.c + disas.c + fpu/softfloat.c + precise-pc.c + ${TARGET_SRC_FILES}) -target_include_directories (cpu PUBLIC - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../include - ${CMAKE_CURRENT_SOURCE_DIR}/target-i386 - ${CMAKE_CURRENT_SOURCE_DIR}/../include/fpu - ${CMAKE_BINARY_DIR}/include -) +target_include_directories(cpu PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR} + ${CMAKE_BINARY_DIR}/include) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__STDC_FORMAT_MACROS -D_GNU_SOURCE -DNEED_CPU_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DTARGET_PHYS_ADDR_BITS=64") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -Werror -fno-omit-frame-pointer") @@ -23,31 +36,30 @@ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexceptions") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fPIC -Wno-mismatched-tags -Werror -Wno-zero-length-array") +if(WITH_SYMBEX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCONFIG_SYMBEX") -if (WITH_SYMBEX) -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCONFIG_SYMBEX") + set(CARGS_LIST "${CMAKE_C_FLAGS}") + separate_arguments(CARGS_LIST) -set(CARGS_LIST "${CMAKE_C_FLAGS}") -separate_arguments(CARGS_LIST) + get_property(dirs TARGET cpu PROPERTY INCLUDE_DIRECTORIES) + foreach(dir ${dirs}) + message(STATUS "dir='${dir}'") + set(BC_INC_DIRS "${BC_INC_DIRS} -I${dir}") + endforeach() + separate_arguments(BC_INC_DIRS) -get_property(dirs TARGET cpu PROPERTY INCLUDE_DIRECTORIES) -foreach(dir ${dirs}) - message(STATUS "dir='${dir}'") - set(BC_INC_DIRS "${BC_INC_DIRS} -I${dir}") -endforeach() -separate_arguments(BC_INC_DIRS) + get_filename_component(LLVM_BIN_DIR ${CMAKE_C_COMPILER} DIRECTORY) -get_filename_component(LLVM_BIN_DIR ${CMAKE_C_COMPILER} DIRECTORY) + add_custom_target( + op_helper_bc ALL + ${CMAKE_C_COMPILER} ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/op_helper.c -c ${CARGS_LIST} -Wno-unused-function -O3 -DSYMBEX_LLVM_LIB -emit-llvm -o ${CMAKE_CURRENT_BINARY_DIR}/op_helper-org.bc ${BC_INC_DIRS} + COMMAND ${CMAKE_C_COMPILER} ${CMAKE_CURRENT_SOURCE_DIR}/fpu/softfloat.c -c ${CARGS_LIST} -Wno-unused-function -O3 -DSYMBEX_LLVM_LIB -emit-llvm -o ${CMAKE_CURRENT_BINARY_DIR}/softfloat.bc ${BC_INC_DIRS} + COMMAND ${LLVM_BIN_DIR}/llvm-link ${CMAKE_CURRENT_BINARY_DIR}/op_helper-org.bc ${CMAKE_CURRENT_BINARY_DIR}/softfloat.bc -o ${CMAKE_CURRENT_BINARY_DIR}/op_helper.bc + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/op_helper.c ${CMAKE_CURRENT_SOURCE_DIR}/fpu/softfloat.c + ) -add_custom_target( - op_helper_bc ALL - ${CMAKE_C_COMPILER} ${CMAKE_CURRENT_SOURCE_DIR}/target-i386/op_helper.c -c ${CARGS_LIST} -Wno-unused-function -O3 -DSYMBEX_LLVM_LIB -emit-llvm -o ${CMAKE_CURRENT_BINARY_DIR}/op_helper-org.bc ${BC_INC_DIRS} - COMMAND ${CMAKE_C_COMPILER} ${CMAKE_CURRENT_SOURCE_DIR}/fpu/softfloat.c -c ${CARGS_LIST} -Wno-unused-function -O3 -DSYMBEX_LLVM_LIB -emit-llvm -o ${CMAKE_CURRENT_BINARY_DIR}/softfloat.bc ${BC_INC_DIRS} - COMMAND ${LLVM_BIN_DIR}/llvm-link ${CMAKE_CURRENT_BINARY_DIR}/op_helper-org.bc ${CMAKE_CURRENT_BINARY_DIR}/softfloat.bc -o ${CMAKE_CURRENT_BINARY_DIR}/op_helper.bc - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/target-i386/op_helper.c ${CMAKE_CURRENT_SOURCE_DIR}/fpu/softfloat.c -) - -endif (WITH_SYMBEX) +endif(WITH_SYMBEX) MESSAGE( STATUS "CMAKE_C_FLAGS: " ${CMAKE_C_FLAGS} ) MESSAGE( STATUS "CMAKE_C_FLAGS_RELEASE: " ${CMAKE_C_FLAGS_RELEASE} ) diff --git a/src/bswap.h b/src/bswap.h index ab8c708..830c03f 100644 --- a/src/bswap.h +++ b/src/bswap.h @@ -19,10 +19,10 @@ #ifndef BSWAP_H #define BSWAP_H -#include - #include -#include "softfloat.h" + +#include +#include #ifdef CONFIG_MACHINE_BSWAP_H #include diff --git a/src/cpu-all.h b/src/cpu-all.h index a28e023..3176255 100644 --- a/src/cpu-all.h +++ b/src/cpu-all.h @@ -21,7 +21,15 @@ #include #include + +#if defined(TARGET_I386)|| defined(TARGET_X86_64) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif + #include "bswap.h" #include "qemu-common.h" @@ -349,8 +357,5 @@ void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data); #define CPU_LOG_LLVM_IR (1 << 10) #define CPU_LOG_LLVM_ASM (1 << 11) -/* Get a list of mapped pages. */ -void list_mapped_pages(CPUX86State *env, unsigned rw_only, unsigned user_only, target_ulong **pages_addr, - size_t *pages_count); #endif /* CPU_ALL_H */ diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 6315f5f..c277f52 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -87,12 +87,13 @@ static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, target target_ulong virt_page2; tb_invalidated_flag = 0; - + DPRINTF(" find translated block using physical mappings \n"); /* find translated block using physical mappings */ phys_pc = get_page_addr_code(env, pc); phys_page1 = phys_pc & TARGET_PAGE_MASK; h = tb_phys_hash_func(phys_pc); ptb1 = &tb_phys_hash[h]; + for (;;) { tb = *ptb1; if (!tb) { @@ -127,6 +128,7 @@ static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, target } not_found: /* if no translated code available, then translate it now */ + DPRINTF(" if no translated code available, then translate it now pc=0x%x, cs_base=0x%x, flags= 0x%lx\n", pc, cs_base, flags); tb = tb_gen_code(env, pc, cs_base, flags, 0); ++g_cpu_stats.tb_regens; @@ -206,28 +208,49 @@ static void cpu_handle_debug_exception(CPUArchState *env) { volatile sig_atomic_t exit_request; #ifdef TRACE_EXEC -static void dump_regs(CPUState *env, int isStart) { +static void dump_regs(CPUArchState *env, int isStart) { #if defined(CONFIG_SYMBEX) - target_ulong eax, ebx, ecx, edx, esi, edi, ebp, esp; - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_EAX]), (uint8_t *) &eax, sizeof(eax)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_EBX]), (uint8_t *) &ebx, sizeof(ebx)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_ECX]), (uint8_t *) &ecx, sizeof(ecx)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_EDX]), (uint8_t *) &edx, sizeof(edx)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_ESI]), (uint8_t *) &esi, sizeof(esi)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_EDI]), (uint8_t *) &edi, sizeof(edi)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_EBP]), (uint8_t *) &ebp, sizeof(ebp)); - g_sqi.regs.read_concrete(offsetof(CPUState, regs[R_ESP]), (uint8_t *) &esp, sizeof(esp)); - - fprintf(logfile, "%c cs:eip=%lx:%lx eax=%lx ebx=%lx ecx=%lx edx=%lx esi=%lx edi=%lx ebp=%lx ss:esp=%lx:%lx\n", - isStart ? 's' : 'e', (uint64_t) env->segs[R_CS].selector, (uint64_t) env->eip, (uint64_t) eax, - (uint64_t) ebx, (uint64_t) ecx, (uint64_t) edx, (uint64_t) esi, (uint64_t) edi, (uint64_t) ebp, - (uint64_t) env->segs[R_SS].selector, (uint64_t) esp); + #if defined(TARGET_I386) || defined(TARGET_X86_64) + target_ulong eax, ebx, ecx, edx, esi, edi, ebp, esp; + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_EAX]), (uint8_t *) &eax, sizeof(eax)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_EBX]), (uint8_t *) &ebx, sizeof(ebx)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_ECX]), (uint8_t *) &ecx, sizeof(ecx)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_EDX]), (uint8_t *) &edx, sizeof(edx)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_ESI]), (uint8_t *) &esi, sizeof(esi)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_EDI]), (uint8_t *) &edi, sizeof(edi)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_EBP]), (uint8_t *) &ebp, sizeof(ebp)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[R_ESP]), (uint8_t *) &esp, sizeof(esp)); + + fprintf(logfile, "%c cs:eip=%lx:%lx eax=%lx ebx=%lx ecx=%lx edx=%lx esi=%lx edi=%lx ebp=%lx ss:esp=%lx:%lx\n", + isStart ? 's' : 'e', (uint64_t) env->segs[R_CS].selector, (uint64_t) env->eip, (uint64_t) eax, + (uint64_t) ebx, (uint64_t) ecx, (uint64_t) edx, (uint64_t) esi, (uint64_t) edi, (uint64_t) ebp, + (uint64_t) env->segs[R_SS].selector, (uint64_t) esp); + #elif defined(TARGET_ARM) + uint32_t R0, R1, R2; + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[0]), (uint8_t *) &R0, sizeof(R0)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[1]), (uint8_t *) &R1, sizeof(R1)); + g_sqi.regs.read_concrete(offsetof(CPUArchState, regs[2]), (uint8_t *) &R2, sizeof(R2)); + + fprintf(logfile, "PC=%x R1=%x R2=%x R3=%x SP=%x LR=%x\n", + (uint32_t) env->regs[15], (uint32_t) R0, (uint32_t) R1, + (uint32_t) R2, (uint32_t) env->regs[13], (uint32_t) env->regs[14]); + #else + #error Unsupported target architecture + #endif #else - fprintf(logfile, "%c cs:eip=%lx:%lx eax=%lx ebx=%lx ecx=%lx edx=%lx esi=%lx edi=%lx ebp=%lx ss:esp=%lx:%lx\n", - isStart ? 's' : 'e', (uint64_t) env->segs[R_CS].selector, (uint64_t) env->eip, (uint64_t) env->regs[R_EAX], - (uint64_t) env->regs[R_EBX], (uint64_t) env->regs[R_ECX], (uint64_t) env->regs[R_EDX], - (uint64_t) env->regs[R_ESI], (uint64_t) env->regs[R_EDI], (uint64_t) env->regs[R_EBP], - (uint64_t) env->segs[R_SS].selector, (uint64_t) env->regs[R_ESP]); + #if defined(TARGET_I386) || defined(TARGET_X86_64) + fprintf(logfile, "%c cs:eip=%lx:%lx eax=%lx ebx=%lx ecx=%lx edx=%lx esi=%lx edi=%lx ebp=%lx ss:esp=%lx:%lx\n", + isStart ? 's' : 'e', (uint64_t) env->segs[R_CS].selector, (uint64_t) env->eip, (uint64_t) env->regs[R_EAX], + (uint64_t) env->regs[R_EBX], (uint64_t) env->regs[R_ECX], (uint64_t) env->regs[R_EDX], + (uint64_t) env->regs[R_ESI], (uint64_t) env->regs[R_EDI], (uint64_t) env->regs[R_EBP], + (uint64_t) env->segs[R_SS].selector, (uint64_t) env->regs[R_ESP]); + #elif defined(TARGET_ARM) + fprintf(logfile, "PC=%x R0=%x R1=%x R2=%x SP=%x LR=%x\n", + (uint32_t) env->regs[15], (uint32_t) env->regs[0], (uint32_t) env->regs[1], + (uint32_t) env->regs[2], (uint32_t) env->regs[13], (uint32_t) env->regs[14]); + #else + #error Unsupported target architecture + #endif #endif } #endif @@ -237,11 +260,18 @@ static uintptr_t fetch_and_run_tb(TranslationBlock *prev_tb, int tb_exit_code, C uintptr_t last_tb; TranslationBlock *tb = tb_find_fast(env); - +#if defined(TARGET_I386) || defined(TARGET_X86_64) DPRINTF("fetch_and_run_tb cs:eip=%#lx:%#lx e=%#lx fl=%lx riw=%d\n", (uint64_t) env->segs[R_CS].selector, (uint64_t) env->eip, (uint64_t) env->eip + tb->size, (uint64_t) env->mflags, env->kvm_request_interrupt_window); +#elif defined(TARGET_ARM) + DPRINTF(" fetch_and_run_tb r15=0x%x sp=0x%x cpsr=0x%x \n", (uint32_t) env->regs[15], env->regs[13], env->uncached_cpsr); +#else +#error Unsupported target architecture +#endif + /* Note: we do it here to avoid a gcc bug on Mac OS X when + doing it in tb_find_slow */ if (tb_invalidated_flag) { prev_tb = NULL; tb_invalidated_flag = 0; @@ -317,9 +347,15 @@ static bool process_interrupt_request(CPUArchState *env) { } bool has_interrupt = false; - +#if defined(TARGET_I386) || defined(TARGET_X86_64) DPRINTF(" process_interrupt intrq=%#x mflags=%#lx hf1=%#x hf2=%#x\n", interrupt_request, (uint64_t) env->mflags, env->hflags, env->hflags2); +#elif defined(TARGET_ARM) + DPRINTF(" process_interrupt intrq=%#x \n", interrupt_request); +#else +#error Unsupported target architecture +#endif + if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ @@ -330,7 +366,15 @@ static bool process_interrupt_request(CPUArchState *env) { env->exception_index = EXCP_DEBUG; cpu_loop_exit(env); } - +#if defined(TARGET_ARM) + if (interrupt_request & CPU_INTERRUPT_HALT) { + env->interrupt_request &= ~CPU_INTERRUPT_HALT; + env->halted = 1; + env->exception_index = EXCP_HLT; + cpu_loop_exit(env); + } +#endif +#if defined(TARGET_I386) if (interrupt_request & CPU_INTERRUPT_INIT) { svm_check_intercept(env, SVM_EXIT_INIT); do_cpu_init(env); @@ -366,9 +410,8 @@ static bool process_interrupt_request(CPUArchState *env) { libcpu_log_mask(CPU_LOG_INT, "Servicing hardware INT=0x%02x\n", intno); if (intno >= 0) { #ifdef SE_KVM_DEBUG_IRQ - DPRINTF("Handling interrupt %d\n", intno); + DPRINTF(" Handling interrupt %d\n", intno); #endif - do_interrupt_x86_hardirq(env, intno, 1); } @@ -387,7 +430,28 @@ static bool process_interrupt_request(CPUArchState *env) { has_interrupt = true; } } - +#elif defined(TARGET_ARM) + if (interrupt_request & CPU_INTERRUPT_FIQ && !(env->uncached_cpsr & CPSR_F)) { + env->exception_index = EXCP_FIQ; + do_interrupt(env); + has_interrupt = true; + } + /* ARMv7-M interrupt return works by loading a magic value + into the PC. On real hardware the load causes the + return to occur. The qemu implementation performs the + jump normally, then does the exception return when the + CPU tries to execute code at the magic address. + This will cause the magic PC value to be pushed to + the stack if an interrupt occurred at the wrong time. + We avoid this by disabling interrupts when + pc contains a magic address. */ + if (interrupt_request & CPU_INTERRUPT_HARD && + ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { + env->exception_index = EXCP_IRQ; + do_interrupt(env); + has_interrupt = true; + } +#endif /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { @@ -454,6 +518,8 @@ static bool execution_loop(CPUArchState *env) { // It's too heavy to log all cpu state, usually gp regs are enough // TODO: add an option to customize which regs to print log_cpu_state(env, X86_DUMP_GPREGS); +#else + log_cpu_state(env, 0); #endif } #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */ @@ -464,17 +530,38 @@ static bool execution_loop(CPUArchState *env) { ltb = (TranslationBlock *) (last_tb & ~TB_EXIT_MASK); if (ltb) { +#if defined(TARGET_I386) || defined(TARGET_X86_64) DPRINTF("ltb s=%#lx e=%#lx fl=%lx exit_code=%x riw=%d\n", (uint64_t) ltb->pc, (uint64_t) ltb->pc + ltb->size, (uint64_t) env->mflags, last_tb_exit_code, env->kvm_request_interrupt_window); +#elif defined(TARGET_ARM) + DPRINTF("ltb s=%#lx e=%#lx exit_code=%x riw=%d\n", (uint64_t) ltb->pc, + (uint64_t) ltb->pc + ltb->size, last_tb_exit_code, + env->kvm_request_interrupt_window); +#else +#error Unsupported target architecture +#endif } if (last_tb_exit_code > TB_EXIT_IDXMAX) { +#if defined(TARGET_I386) || defined(TARGET_X86_64) env->eip = ltb->pc - ltb->cs_base; +#elif defined(TARGET_ARM) + env->regs[15] = ltb->pc; +#else +#error Unsupported target architecture +#endif ltb = NULL; } - if (env->kvm_request_interrupt_window && (env->mflags & IF_MASK)) { + if (env->kvm_request_interrupt_window && +#if defined(TARGET_I386) + (env->mflags & IF_MASK)) { +#elif defined(TARGET_ARM) + !(env->uncached_cpsr & CPSR_I)) { +#else +#error unsupported target CPU +#endif env->kvm_request_interrupt_window = 0; return true; } @@ -485,7 +572,6 @@ static bool execution_loop(CPUArchState *env) { int cpu_exec(CPUArchState *env) { int ret; - if (env->halted) { if (!cpu_has_work(env)) { return EXCP_HALTED; @@ -510,8 +596,6 @@ int cpu_exec(CPUArchState *env) { env->exception_index = -1; - DPRINTF("cpu_loop enter mflags=%#lx hf1=%#x hf2=%#x\n", (uint64_t) env->mflags, env->hflags, env->hflags2); - /* prepare setjmp context for exception handling */ for (;;) { if (setjmp(env->jmp_env) == 0) { @@ -521,9 +605,13 @@ int cpu_exec(CPUArchState *env) { * This usually happens when TB cache is flushed but current tb is not reset. */ env->current_tb = NULL; - +#if defined(TARGET_I386) DPRINTF(" setjmp entered eip=%#lx\n", (uint64_t) env->eip); - +#elif defined(TARGET_ARM) + DPRINTF(" setjmp entered r15=%#x\n", (uint32_t) env->regs[15]); +#else +#error Unsupported target architecture +#endif #ifdef CONFIG_SYMBEX assert(env->exception_index != EXCP_SE); if (g_sqi.exec.finalize_tb_exec()) { @@ -536,7 +624,6 @@ int cpu_exec(CPUArchState *env) { continue; } #endif - ret = process_exceptions(env); if (ret) { if (ret == EXCP_HLT && env->interrupt_request) { @@ -546,7 +633,7 @@ int cpu_exec(CPUArchState *env) { } break; } - + DPRINTF(" execution_loop\n"); if (execution_loop(env)) { break; } @@ -565,7 +652,15 @@ int cpu_exec(CPUArchState *env) { env = cpu_single_env; } } /* for(;;) */ +#if defined(TARGET_I386) DPRINTF("cpu_loop exit ret=%#x eip=%#lx\n", ret, (uint64_t) env->eip); +#elif defined(TARGET_ARM) + DPRINTF("cpu_loop exit ret=%#x r15=%#x\n", ret, (uint32_t) env->regs[15]); +#else +#error Unsupported target architecture +#endif + + env->current_tb = NULL; diff --git a/src/cpus.c b/src/cpus.c index 883e322..a42b6c4 100644 --- a/src/cpus.c +++ b/src/cpus.c @@ -203,8 +203,10 @@ static void qemu_tcg_init_vcpu(void *_env) { void qemu_init_vcpu(void *_env) { CPUArchState *env = _env; +#if defined(TARGET_I386) env->cpuid.nr_cores = 1; env->cpuid.nr_threads = 1; +#endif if (tcg_enabled()) { qemu_tcg_init_vcpu(env); diff --git a/src/disas.c b/src/disas.c index 2f2cbe8..60859d3 100644 --- a/src/disas.c +++ b/src/disas.c @@ -18,7 +18,14 @@ #include #include +#if defined(TARGET_I386) #include +#elif defined(TARGET_ARM) +#include +#else +#error unsupported target CPU +#endif + #include #include diff --git a/src/exec-all.h b/src/exec-all.h index bf04223..f8dc357 100644 --- a/src/exec-all.h +++ b/src/exec-all.h @@ -68,12 +68,12 @@ void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); #ifdef CONFIG_SYMBEX void cpu_gen_flush(void); void cpu_gen_init_opc(void); -void se_restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, target_ulong pc, int cc_op, target_ulong next_pc); +void se_restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, target_ulong pc, int cc_op, target_ulong next_pc); #endif void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, target_ulong *data); -int restore_state_to_next_pc(CPUX86State *env, TranslationBlock *tb); +int restore_state_to_next_pc(CPUArchState *env, TranslationBlock *tb); int cpu_gen_code(CPUArchState *env, TranslationBlock *tb); @@ -157,6 +157,9 @@ typedef void(CPUDebugExcpHandler)(CPUArchState *env); CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); +/* vl.c */ +extern int singlestep; + /* cpu-exec.c */ extern volatile sig_atomic_t exit_request; diff --git a/src/exec-ram.h b/src/exec-ram.h index 11d098a..406867d 100644 --- a/src/exec-ram.h +++ b/src/exec-ram.h @@ -17,14 +17,25 @@ /// License along with this library; if not, see . #ifndef __EXEC_RAM_H__ - #define __EXEC_RAM_H__ +#include + #include +#include + +#if defined(TARGET_I386) #include +#elif defined(TARGET_ARM) +#include +#else +#error undefined target CPU +#endif + +#ifdef CONFIG_SYMBEX #include -#include -#include +#endif + #include "qqueue.h" #define RAM_ADDR_MAX UINTPTR_MAX diff --git a/src/exec-tb.c b/src/exec-tb.c index 79f2593..3814a99 100644 --- a/src/exec-tb.c +++ b/src/exec-tb.c @@ -402,7 +402,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int current_tb_modified = 1; cpu_restore_state(env, env->mem_io_pc); cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, ¤t_flags); - // When an instruction modifies itself, advance pc to the next instruction // and abort the tb asap. int instr_size = tb_get_instruction_size(current_tb, current_tb->cs_base + env->eip); diff --git a/src/exec.c b/src/exec.c index e171782..5227ddc 100644 --- a/src/exec.c +++ b/src/exec.c @@ -44,8 +44,8 @@ //#define DEBUG_UNASSIGNED /* make various TB consistency checks */ -//#define DEBUG_TB_CHECK -//#define DEBUG_TLB_CHECK +#define DEBUG_TB_CHECK +#define DEBUG_TLB_CHECK //#define DEBUG_IOPORT //#define DEBUG_SUBPAGE diff --git a/src/exec.h b/src/exec.h index 924a59f..de61cb7 100644 --- a/src/exec.h +++ b/src/exec.h @@ -22,7 +22,8 @@ #include #include -#include "target-i386/cpu.h" + +#include "cpu.h" /* In system mode we want L1_MAP to be based on ram offsets, while in user mode we want it to be based on virtual addresses. */ diff --git a/src/fpu/softfloat.c b/src/fpu/softfloat.c index 520208b..e60f50b 100644 --- a/src/fpu/softfloat.c +++ b/src/fpu/softfloat.c @@ -40,14 +40,14 @@ these four paragraphs for those parts of this code that are retained. */ #include -#include "softfloat.h" +#include /*---------------------------------------------------------------------------- | Primitive arithmetic functions, including multi-word arithmetic, and | division and square root approximations. (Can be specialized to target if | desired.) *----------------------------------------------------------------------------*/ -#include "softfloat-macros.h" +#include /*---------------------------------------------------------------------------- | Functions and definitions to determine: (1) whether tininess for underflow @@ -57,7 +57,7 @@ these four paragraphs for those parts of this code that are retained. | are propagated from function inputs to output. These details are target- | specific. *----------------------------------------------------------------------------*/ -#include "softfloat-specialize.h" +#include void set_float_rounding_mode(int val STATUS_PARAM) { STATUS_W(float_rounding_mode, val); diff --git a/src/memory.c b/src/memory.c index 1019a39..8102716 100644 --- a/src/memory.c +++ b/src/memory.c @@ -19,6 +19,7 @@ #include #include #include + #include "exec-all.h" #include "exec-ram.h" diff --git a/src/target-arm/cpu.h b/src/target-arm/cpu.h new file mode 100644 index 0000000..a0854e1 --- /dev/null +++ b/src/target-arm/cpu.h @@ -0,0 +1,486 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2017 Adrian Herrera +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#ifndef CPU_ARM_H +#define CPU_ARM_H + +#include + +#include +#include +#include +#include +#include + +#include "cpu-defs.h" +#include "nvic_interfaces.h" + +#ifdef CONFIG_SYMBEX +#include +#endif + +#include + +#define EXCP_UDEF 1 /* undefined instruction */ +#define EXCP_SWI 2 /* software interrupt */ +#define EXCP_PREFETCH_ABORT 3 +#define EXCP_DATA_ABORT 4 +#define EXCP_IRQ 5 +#define EXCP_FIQ 6 +#define EXCP_BKPT 7 +#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ +#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ +#define EXCP_STREX 10 + +#define ARMV7M_EXCP_RESET 1 +#define ARMV7M_EXCP_NMI 2 +#define ARMV7M_EXCP_HARD 3 +#define ARMV7M_EXCP_MEM 4 +#define ARMV7M_EXCP_BUS 5 +#define ARMV7M_EXCP_USAGE 6 +#define ARMV7M_EXCP_SVC 11 +#define ARMV7M_EXCP_DEBUG 12 +#define ARMV7M_EXCP_PENDSV 14 +#define ARMV7M_EXCP_SYSTICK 15 + +/* ARM-specific interrupt pending bits. */ +#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 + +struct arm_boot_info; + +#if defined(CONFIG_SYMBEX) && !defined(SYMBEX_LLVM_LIB) + +/* uncomment this to compile assertions in */ +/* #define DO_SANITY_CHECK */ + +#ifdef DO_SANITY_CHECK +#define CHECK_ASSERT(x) assert(x) +#else +#define CHECK_ASSERT(x) +#endif + +/* Macros to access registers */ +static inline target_ulong __RR_env_raw(CPUARMState *cpuState, unsigned offset, unsigned size) { + if (likely(*g_sqi.mode.fast_concrete_invocation)) { + switch (size) { + case 1: + return *((uint8_t *) cpuState + offset); + case 2: + return *(uint16_t *) ((uint8_t *) cpuState + offset); + case 4: + return *(uint32_t *) ((uint8_t *) cpuState + offset); + case 8: + return *(uint64_t *) ((uint8_t *) cpuState + offset); + default: + assert(false); + return 0; + } + } else { + target_ulong result = 0; + g_sqi.regs.read_concrete(offset, (uint8_t *) &result, size); + + return result; + } +} + +static inline void __WR_env_raw(CPUARMState *cpuState, unsigned offset, target_ulong value, unsigned size) { + if (likely(*g_sqi.mode.fast_concrete_invocation)) { + switch (size) { + case 1: + *((uint8_t *) cpuState + offset) = value; + break; + case 2: + *(uint16_t *) ((uint8_t *) cpuState + offset) = value; + break; + case 4: + *(uint32_t *) ((uint8_t *) cpuState + offset) = value; + break; + case 8: + *(uint64_t *) ((uint8_t *) cpuState + offset) = value; + break; + default: + assert(false); + } + } else { + g_sqi.regs.write_concrete(offset, (uint8_t *) &value, size); + } +} + +#define RR_cpu(cpu, reg) ((__typeof__(cpu->reg)) __RR_env_raw(cpu, offsetof(CPUARMState, reg), sizeof(cpu->reg))) +#define WR_cpu(cpu, reg, value) __WR_env_raw(cpu, offsetof(CPUARMState, reg), (target_ulong) value, sizeof(cpu->reg)) +#else /* defined(CONFIG_SYMBEX) && !defined(SYMBEX_LLVM_LIB) */ +#define RR_cpu(cpu, reg) cpu->reg +#define WR_cpu(cpu, reg, value) cpu->reg = value +#endif /* defined(CONFIG_SYMBEX) && !defined(SYMBEX_LLVM_LIB) */ + +#ifdef ENABLE_PRECISE_EXCEPTION_DEBUGGING +#define WR_se_pc(cpu, value) cpu->preceise_pc = value +#else +#define WR_se_pc(cpu, value) +#endif + +CPUARMState *cpu_arm_init(const char *cpu_model); +void arm_translate_init(void); +int cpu_arm_exec(CPUARMState *s); +void do_interrupt(CPUARMState *); +void switch_mode(CPUARMState *, int); +uint32_t do_arm_semihosting(CPUARMState *env); + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_arm_signal_handler(int host_signum, void *pinfo, void *puc); +int cpu_arm_handle_mmu_fault(CPUARMState *env, target_ulong address, int rw, int mmu_idx); +#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault + +static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) { + env->cp15.c13_tls2 = newtls; +} + +#define CPSR_M (0x1f) +#define CPSR_T (1 << 5) +#define CPSR_F (1 << 6) +#define CPSR_I (1 << 7) +#define CPSR_A (1 << 8) +#define CPSR_E (1 << 9) +#define CPSR_IT_2_7 (0xfc00) +#define CPSR_GE (0xf << 16) +#define CPSR_RESERVED (0xf << 20) +#define CPSR_J (1 << 24) +#define CPSR_IT_0_1 (3 << 25) +#define CPSR_Q (1 << 27) +#define CPSR_V (1 << 28) +#define CPSR_C (1 << 29) +#define CPSR_Z (1 << 30) +#define CPSR_N (1 << 31) +#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) + +#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) +#define CACHED_CPSR_BITS (CPSR_T | CPSR_GE | CPSR_IT | CPSR_Q | CPSR_NZCV) +/* Bits writable in user mode. */ +#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) +/* Execution state bits. MRS read as zero, MSR writes ignored. */ +#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J) + +/* Return the current CPSR value. */ +uint32_t cpsr_read(CPUARMState *env); +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */ +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask); + +/* Return the current xPSR value. */ +static inline uint32_t xpsr_read(CPUARMState *env) { + int ZF; + ZF = (RR_cpu(env, ZF) == 0); + return (RR_cpu(env, NF) & 0x80000000) | (ZF << 30) | (RR_cpu(env, CF) << 29) | + ((RR_cpu(env, VF) & 0x80000000) >> 3) | (env->QF << 27) | (env->thumb << 24) | + ((env->condexec_bits & 3) << 25) | ((env->condexec_bits & 0xfc) << 8) | env->v7m.exception; +} + +/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ +static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { + if (mask & CPSR_NZCV) { + WR_cpu(env, ZF, (~val) & CPSR_Z); + WR_cpu(env, NF, val); + WR_cpu(env, CF, (val >> 29) & 1); + WR_cpu(env, VF, (val << 3) & 0x80000000); + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & (1 << 24)) + env->thumb = ((val & (1 << 24)) != 0); + if (mask & CPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & CPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & 0x1ff) { + env->v7m.exception = val & 0x1ff; + } +} + +/* Return the current FPSCR value. */ +uint32_t vfp_get_fpscr(CPUARMState *env); +void vfp_set_fpscr(CPUARMState *env, uint32_t val); + +enum arm_cpu_mode { + ARM_CPU_MODE_USR = 0x10, + ARM_CPU_MODE_FIQ = 0x11, + ARM_CPU_MODE_IRQ = 0x12, + ARM_CPU_MODE_SVC = 0x13, + ARM_CPU_MODE_ABT = 0x17, + ARM_CPU_MODE_UND = 0x1b, + ARM_CPU_MODE_SYS = 0x1f +}; + +/* VFP system registers. */ +#define ARM_VFP_FPSID 0 +#define ARM_VFP_FPSCR 1 +#define ARM_VFP_MVFR1 6 +#define ARM_VFP_MVFR0 7 +#define ARM_VFP_FPEXC 8 +#define ARM_VFP_FPINST 9 +#define ARM_VFP_FPINST2 10 + +/* iwMMXt coprocessor control registers. */ +#define ARM_IWMMXT_wCID 0 +#define ARM_IWMMXT_wCon 1 +#define ARM_IWMMXT_wCSSF 2 +#define ARM_IWMMXT_wCASF 3 +#define ARM_IWMMXT_wCGR0 8 +#define ARM_IWMMXT_wCGR1 9 +#define ARM_IWMMXT_wCGR2 10 +#define ARM_IWMMXT_wCGR3 11 + +/* enum arm_features { */ + // ARM_FEATURE_VFP, + // ARM_FEATURE_AUXCR, [> ARM1026 Auxiliary control register. <] + // ARM_FEATURE_XSCALE, [> Intel XScale extensions. <] + // ARM_FEATURE_IWMMXT, [> Intel iwMMXt extension. <] + // ARM_FEATURE_V6, + // ARM_FEATURE_V6K, + // ARM_FEATURE_V7, + // ARM_FEATURE_THUMB2, + // ARM_FEATURE_MPU, [> Only has Memory Protection Unit, not full MMU. <] + // ARM_FEATURE_VFP3, + // ARM_FEATURE_VFP_FP16, + // ARM_FEATURE_NEON, + // ARM_FEATURE_THUMB_DIV, [> divide supported in Thumb encoding <] + // ARM_FEATURE_M, [> Microcontroller profile. <] + // ARM_FEATURE_OMAPCP, [> OMAP specific CP15 ops handling. <] + // ARM_FEATURE_THUMB2EE, + // ARM_FEATURE_V7MP, [> v7 Multiprocessing Extensions <] + // ARM_FEATURE_V4T, + // ARM_FEATURE_V5, + // ARM_FEATURE_STRONGARM, + // ARM_FEATURE_VAPA, [> cp15 VA to PA lookups <] + // ARM_FEATURE_ARM_DIV, [> divide supported in ARM encoding <] + // ARM_FEATURE_VFP4, [> VFPv4 (implies that NEON is v2) <] + // ARM_FEATURE_GENERIC_TIMER, + // ARM_FEATURE_MVFR, [> Media and VFP Feature Registers 0 and 1 <] +/* }; */ +enum arm_features { + ARM_FEATURE_VFP, + ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ + ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ + ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ + ARM_FEATURE_V6, + ARM_FEATURE_V6K, + ARM_FEATURE_V7, + ARM_FEATURE_THUMB2, + ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */ + ARM_FEATURE_VFP3, + ARM_FEATURE_VFP_FP16, + ARM_FEATURE_NEON, + ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ + ARM_FEATURE_M, /* Microcontroller profile. */ + ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ + ARM_FEATURE_THUMB2EE, + ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ + ARM_FEATURE_V4T, + ARM_FEATURE_V5, + ARM_FEATURE_STRONGARM, + ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ + ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ + ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ + ARM_FEATURE_GENERIC_TIMER, + ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ + ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ + ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ + ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ + ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ + ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ + ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ + ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ + ARM_FEATURE_V8, + ARM_FEATURE_AARCH64, /* supports 64 bit mode */ + ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ + ARM_FEATURE_CBAR, /* has cp15 CBAR */ + ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ + ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ + ARM_FEATURE_EL2, /* has EL2 Virtualization support */ + ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ + ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ + ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ + ARM_FEATURE_PMU, /* has PMU support */ + ARM_FEATURE_VBAR, /* has cp15 VBAR */ + ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ + ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ + ARM_FEATURE_SVE, /* has Scalable Vector Extension */ + ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */ + ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */ + ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */ + ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ + ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */ + ARM_FEATURE_M_MAIN, /* M profile Main Extension */ +}; +static inline int arm_feature(CPUARMState *env, int feature) { + return (env->features & (1ULL << feature)) != 0; +} + +void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); + +/* Interface between CPU and Interrupt controller. */ +/* void armv7m_nvic_set_pending(void *opaque, int irq, bool secure); */ +// int armv7m_nvic_acknowledge_irq(void *opaque); +/* void armv7m_nvic_complete_irq(void *opaque, int irq, bool secure); */ + +void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write, void *opaque); + +/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3. + Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are + conventional cores (ie. Application or Realtime profile). */ + +#define IS_M(env) arm_feature(env, ARM_FEATURE_M) +#define ARM_CPUID(env) (env->cp15.c0_cpuid) + +#define ARM_CPUID_ARM1026 0x4106a262 +#define ARM_CPUID_ARM926 0x41069265 +#define ARM_CPUID_ARM946 0x41059461 +#define ARM_CPUID_TI915T 0x54029152 +#define ARM_CPUID_TI925T 0x54029252 +#define ARM_CPUID_SA1100 0x4401A11B +#define ARM_CPUID_SA1110 0x6901B119 +#define ARM_CPUID_PXA250 0x69052100 +#define ARM_CPUID_PXA255 0x69052d00 +#define ARM_CPUID_PXA260 0x69052903 +#define ARM_CPUID_PXA261 0x69052d05 +#define ARM_CPUID_PXA262 0x69052d06 +#define ARM_CPUID_PXA270 0x69054110 +#define ARM_CPUID_PXA270_A0 0x69054110 +#define ARM_CPUID_PXA270_A1 0x69054111 +#define ARM_CPUID_PXA270_B0 0x69054112 +#define ARM_CPUID_PXA270_B1 0x69054113 +#define ARM_CPUID_PXA270_C0 0x69054114 +#define ARM_CPUID_PXA270_C5 0x69054117 +#define ARM_CPUID_ARM1136 0x4117b363 +#define ARM_CPUID_ARM1136_R2 0x4107b362 +#define ARM_CPUID_ARM1176 0x410fb767 +#define ARM_CPUID_ARM11MPCORE 0x410fb022 +#define ARM_CPUID_CORTEXA8 0x410fc080 +#define ARM_CPUID_CORTEXA9 0x410fc090 +#define ARM_CPUID_CORTEXA15 0x412fc0f1 +#define ARM_CPUID_CORTEXM3 0x410fc231 +#define ARM_CPUID_ANY 0xffffffff + +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 + +#define cpu_init cpu_arm_init +#define cpu_exec cpu_arm_exec +#define cpu_gen_code cpu_arm_gen_code +#define cpu_signal_handler cpu_arm_signal_handler +#define cpu_list arm_cpu_list + +#define CPU_SAVE_VERSION 6 + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_USER_IDX 1 + +static inline int cpu_mmu_index(CPUARMState *env) { + return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; +} + +#include "cpu-all.h" + +/* Bit usage in the TB flags field: */ +#define ARM_TBFLAG_THUMB_SHIFT 0 +#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT) +#define ARM_TBFLAG_VECLEN_SHIFT 1 +#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) +#define ARM_TBFLAG_VECSTRIDE_SHIFT 4 +#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) +#define ARM_TBFLAG_PRIV_SHIFT 6 +#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT) +#define ARM_TBFLAG_VFPEN_SHIFT 7 +#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) +#define ARM_TBFLAG_CONDEXEC_SHIFT 8 +#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) +#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16 +#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT) +/* Bits 31..17 are currently unused. */ + +/* some convenience accessor macros */ +#define ARM_TBFLAG_THUMB(F) (((F) &ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) +#define ARM_TBFLAG_VECLEN(F) (((F) &ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) +#define ARM_TBFLAG_VECSTRIDE(F) (((F) &ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) +#define ARM_TBFLAG_PRIV(F) (((F) &ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT) +#define ARM_TBFLAG_VFPEN(F) (((F) &ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) +#define ARM_TBFLAG_CONDEXEC(F) (((F) &ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) +#define ARM_TBFLAG_BSWAP_CODE(F) (((F) &ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT) + +static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, int *flags) { + int privmode; + *pc = env->regs[15]; + *cs_base = 0; + *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) | + (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) | + (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT); + if (arm_feature(env, ARM_FEATURE_M)) { + privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1)); + } else { + privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR; + } + if (privmode) { + *flags |= ARM_TBFLAG_PRIV_MASK; + } + if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { + *flags |= ARM_TBFLAG_VFPEN_MASK; + } +} + +static inline bool cpu_has_work(CPUARMState *env) { + return env->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB); +} + +#include "exec-all.h" + +static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) { + env->regs[15] = tb->pc; +} + +/* Load an instruction and return it in the standard little-endian order */ +static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) { + uint32_t insn = ldl_raw(addr); + if (do_swap) { + return bswap32(insn); + } + return insn; +} + +/* Ditto, for a halfword (Thumb) instruction */ +static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) { + uint16_t insn = lduw_raw(addr); + if (do_swap) { + return bswap16(insn); + } + return insn; +} + +#endif diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c new file mode 100644 index 0000000..13763a9 --- /dev/null +++ b/src/target-arm/helper.c @@ -0,0 +1,3026 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2017 Adrian Herrera +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#include + +#include "cpu.h" + +#include + +#include "helper.h" + +#include "host-utils.h" + +int semihosting_enabled = 0; +int smp_cpus = 1; + +static uint32_t cortexa15_cp15_c0_c1[8] = {0x00001131, 0x00011011, 0x02010555, 0x00000000, + 0x10201105, 0x20000000, 0x01240000, 0x02102211}; + +static uint32_t cortexa15_cp15_c0_c2[8] = {0x02101110, 0x13112111, 0x21232041, 0x11112131, 0x10011142, 0, 0, 0}; + +static uint32_t cortexa9_cp15_c0_c1[8] = {0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111}; + +static uint32_t cortexa9_cp15_c0_c2[8] = {0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0}; + +static uint32_t cortexa8_cp15_c0_c1[8] = {0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11}; + +static uint32_t cortexa8_cp15_c0_c2[8] = {0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0}; + +static uint32_t mpcore_cp15_c0_c1[8] = {0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0}; + +static uint32_t mpcore_cp15_c0_c2[8] = {0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0}; + +static uint32_t arm1136_cp15_c0_c1[8] = {0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0}; + +static uint32_t arm1136_cp15_c0_c2[8] = {0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0}; + +static uint32_t arm1176_cp15_c0_c1[8] = {0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0}; + +static uint32_t arm1176_cp15_c0_c2[8] = {0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0}; + +static uint32_t cpu_arm_find_by_name(const char *name); + +static inline void set_feature(CPUARMState *env, int feature) { + env->features |= 1u << feature; +} + +static void cpu_reset_model_id(CPUARMState *env, uint32_t id) { + env->cp15.c0_cpuid = id; + switch (id) { + case ARM_CPUID_ARM926: + set_feature(env, ARM_FEATURE_V5); + set_feature(env, ARM_FEATURE_VFP); + env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090; + env->cp15.c0_cachetype = 0x1dd20d2; + env->cp15.c1_sys = 0x00090078; + break; + case ARM_CPUID_ARM946: + set_feature(env, ARM_FEATURE_V5); + set_feature(env, ARM_FEATURE_MPU); + env->cp15.c0_cachetype = 0x0f004006; + env->cp15.c1_sys = 0x00000078; + break; + case ARM_CPUID_ARM1026: + set_feature(env, ARM_FEATURE_V5); + set_feature(env, ARM_FEATURE_VFP); + set_feature(env, ARM_FEATURE_AUXCR); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0; + env->cp15.c0_cachetype = 0x1dd20d2; + env->cp15.c1_sys = 0x00090078; + break; + case ARM_CPUID_ARM1136: + /* This is the 1136 r1, which is a v6K core */ + set_feature(env, ARM_FEATURE_V6K); + /* Fall through */ + case ARM_CPUID_ARM1136_R2: + /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an + * older core than plain "arm1136". In particular this does not + * have the v6K features. + */ + set_feature(env, ARM_FEATURE_V6); + set_feature(env, ARM_FEATURE_VFP); + /* These ID register values are correct for 1136 but may be wrong + * for 1136_r2 (in particular r0p2 does not actually implement most + * of the ID registers). + */ + env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000; + memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x1dd20d2; + env->cp15.c1_sys = 0x00050078; + break; + case ARM_CPUID_ARM1176: + set_feature(env, ARM_FEATURE_V6K); + set_feature(env, ARM_FEATURE_VFP); + set_feature(env, ARM_FEATURE_VAPA); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000; + memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x1dd20d2; + env->cp15.c1_sys = 0x00050078; + break; + case ARM_CPUID_ARM11MPCORE: + set_feature(env, ARM_FEATURE_V6K); + set_feature(env, ARM_FEATURE_VFP); + set_feature(env, ARM_FEATURE_VAPA); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000; + memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x1dd20d2; + break; + case ARM_CPUID_CORTEXA8: + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_VFP3); + set_feature(env, ARM_FEATURE_NEON); + set_feature(env, ARM_FEATURE_THUMB2EE); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100; + memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x82048004; + env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3; + env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */ + env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */ + env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */ + env->cp15.c1_sys = 0x00c50078; + break; + case ARM_CPUID_CORTEXA9: + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_VFP3); + set_feature(env, ARM_FEATURE_VFP_FP16); + set_feature(env, ARM_FEATURE_NEON); + set_feature(env, ARM_FEATURE_THUMB2EE); + /* Note that A9 supports the MP extensions even for + * A9UP and single-core A9MP (which are both different + * and valid configurations; we don't model A9UP). + */ + set_feature(env, ARM_FEATURE_V7MP); + env->vfp.xregs[ARM_VFP_FPSID] = 0x41033090; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111; + memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x80038003; + env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3; + env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */ + env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */ + env->cp15.c1_sys = 0x00c50078; + break; + case ARM_CPUID_CORTEXA15: + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_VFP4); + set_feature(env, ARM_FEATURE_VFP_FP16); + set_feature(env, ARM_FEATURE_NEON); + set_feature(env, ARM_FEATURE_THUMB2EE); + set_feature(env, ARM_FEATURE_ARM_DIV); + set_feature(env, ARM_FEATURE_V7MP); + set_feature(env, ARM_FEATURE_GENERIC_TIMER); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410430f0; + env->vfp.xregs[ARM_VFP_MVFR0] = 0x10110222; + env->vfp.xregs[ARM_VFP_MVFR1] = 0x11111111; + memcpy(env->cp15.c0_c1, cortexa15_cp15_c0_c1, 8 * sizeof(uint32_t)); + memcpy(env->cp15.c0_c2, cortexa15_cp15_c0_c2, 8 * sizeof(uint32_t)); + env->cp15.c0_cachetype = 0x8444c004; + env->cp15.c0_clid = 0x0a200023; + env->cp15.c0_ccsid[0] = 0x701fe00a; /* 32K L1 dcache */ + env->cp15.c0_ccsid[1] = 0x201fe00a; /* 32K L1 icache */ + env->cp15.c0_ccsid[2] = 0x711fe07a; /* 4096K L2 unified cache */ + env->cp15.c1_sys = 0x00c50078; + break; + case ARM_CPUID_CORTEXM3: + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_M); + break; + case ARM_CPUID_ANY: /* For userspace emulation. */ + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_VFP4); + set_feature(env, ARM_FEATURE_VFP_FP16); + set_feature(env, ARM_FEATURE_NEON); + set_feature(env, ARM_FEATURE_THUMB2EE); + set_feature(env, ARM_FEATURE_ARM_DIV); + set_feature(env, ARM_FEATURE_V7MP); + break; + case ARM_CPUID_TI915T: + case ARM_CPUID_TI925T: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_OMAPCP); + env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */ + env->cp15.c0_cachetype = 0x5109149; + env->cp15.c1_sys = 0x00000070; + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; + break; + case ARM_CPUID_PXA250: + case ARM_CPUID_PXA255: + case ARM_CPUID_PXA260: + case ARM_CPUID_PXA261: + case ARM_CPUID_PXA262: + set_feature(env, ARM_FEATURE_V5); + set_feature(env, ARM_FEATURE_XSCALE); + /* JTAG_ID is ((id << 28) | 0x09265013) */ + env->cp15.c0_cachetype = 0xd172172; + env->cp15.c1_sys = 0x00000078; + break; + case ARM_CPUID_PXA270_A0: + case ARM_CPUID_PXA270_A1: + case ARM_CPUID_PXA270_B0: + case ARM_CPUID_PXA270_B1: + case ARM_CPUID_PXA270_C0: + case ARM_CPUID_PXA270_C5: + set_feature(env, ARM_FEATURE_V5); + set_feature(env, ARM_FEATURE_XSCALE); + /* JTAG_ID is ((id << 28) | 0x09265013) */ + set_feature(env, ARM_FEATURE_IWMMXT); + env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; + env->cp15.c0_cachetype = 0xd172172; + env->cp15.c1_sys = 0x00000078; + break; + case ARM_CPUID_SA1100: + case ARM_CPUID_SA1110: + set_feature(env, ARM_FEATURE_STRONGARM); + env->cp15.c1_sys = 0x00000070; + break; + default: + cpu_abort(env, "Bad CPU ID: %x\n", id); + break; + } + + /* Some features automatically imply others: */ + if (arm_feature(env, ARM_FEATURE_V7)) { + set_feature(env, ARM_FEATURE_VAPA); + set_feature(env, ARM_FEATURE_THUMB2); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V6K); + } else { + set_feature(env, ARM_FEATURE_V6); + } + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + set_feature(env, ARM_FEATURE_V6); + set_feature(env, ARM_FEATURE_MVFR); + } + if (arm_feature(env, ARM_FEATURE_V6)) { + set_feature(env, ARM_FEATURE_V5); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_AUXCR); + } + } + if (arm_feature(env, ARM_FEATURE_V5)) { + set_feature(env, ARM_FEATURE_V4T); + } + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_THUMB_DIV); + } + if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { + set_feature(env, ARM_FEATURE_THUMB_DIV); + } + if (arm_feature(env, ARM_FEATURE_VFP4)) { + set_feature(env, ARM_FEATURE_VFP3); + } + if (arm_feature(env, ARM_FEATURE_VFP3)) { + set_feature(env, ARM_FEATURE_VFP); + } +} + +/* TODO Move contents into arm_cpu_reset() in cpu.c, + * once cpu_reset_model_id() is eliminated, + * and then forward to cpu_reset() here. + */ +void cpu_state_reset(CPUARMState *env) { + uint32_t id; + uint32_t tmp = 0; + + if (libcpu_loglevel_mask(CPU_LOG_RESET)) { + libcpu_log("CPU Reset (CPU %d)\n", env->cpu_index); + log_cpu_state(env, 0); + } + + id = env->cp15.c0_cpuid; + tmp = env->cp15.c15_config_base_address; + memset(env, 0, offsetof(CPUARMState, breakpoints)); + if (id) + cpu_reset_model_id(env, id); + env->cp15.c15_config_base_address = tmp; + /* SVC mode with interrupts disabled. */ + env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I; + /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is + clear at reset. Initial SP and PC are loaded from ROM. */ + if (IS_M(env)) { + env->uncached_cpsr &= ~CPSR_I; + } + env->vfp.xregs[ARM_VFP_FPEXC] = 0; + env->cp15.c2_base_mask = 0xffffc000u; + /* v7 performance monitor control register: same implementor + * field as main ID register, and we implement no event counters. + */ + env->cp15.c9_pmcr = (id & 0xff000000); + set_flush_to_zero(1, &env->vfp.standard_fp_status); + set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); + set_default_nan_mode(1, &env->vfp.standard_fp_status); + set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.fp_status); + set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.standard_fp_status); + tlb_flush(env, 1); + /* Reset is a state change for some CPUARMState fields which we + * bake assumptions about into translated code, so we need to + * tb_flush(). + */ + tb_flush(env); + env->kvm_irq = -1; +} + +//static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { +// int nregs; +// +// /* VFP data registers are always little-endian. */ +// nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; +// if (reg < nregs) { +// stfq_le_p(buf, env->vfp.regs[reg]); +// return 8; +// } +// if (arm_feature(env, ARM_FEATURE_NEON)) { +// /* Aliases for Q regs. */ +// nregs += 16; +// if (reg < nregs) { +// stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]); +// stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]); +// return 16; +// } +// } +// switch (reg - nregs) { +// case 0: +// stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); +// return 4; +// case 1: +// stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); +// return 4; +// case 2: +// stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); +// return 4; +// } +// return 0; +//} +// +//static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) { +// int nregs; +// +// nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; +// if (reg < nregs) { +// env->vfp.regs[reg] = ldfq_le_p(buf); +// return 8; +// } +// if (arm_feature(env, ARM_FEATURE_NEON)) { +// nregs += 16; +// if (reg < nregs) { +// env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf); +// env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8); +// return 16; +// } +// } +// switch (reg - nregs) { +// case 0: +// env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); +// return 4; +// case 1: +// env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); +// return 4; +// case 2: +// env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); +// return 4; +// } +// return 0; +//} + +CPUARMState *cpu_arm_init(const char *cpu_model) { + + CPUARMState *env; + uint32_t id; + static int inited = 0; + + id = cpu_arm_find_by_name(cpu_model); + if (id == 0) + return NULL; + env = g_malloc0(sizeof(CPUARMState)); + cpu_exec_init(env); + if (tcg_enabled() && !inited) { + inited = 1; + arm_translate_init(); + } + + env->cp15.c0_cpuid = id; + cpu_state_reset(env); + +// if (arm_feature(env, ARM_FEATURE_NEON)) { +// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); +// } else if (arm_feature(env, ARM_FEATURE_VFP3)) { +// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 35, "arm-vfp3.xml", 0); +// } else if (arm_feature(env, ARM_FEATURE_VFP)) { +// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 19, "arm-vfp.xml", 0); +// } + + qemu_init_vcpu(env); + return env; +} + +struct arm_cpu_t { + uint32_t id; + const char *name; +}; + +static const struct arm_cpu_t arm_cpu_names[] = {{ARM_CPUID_ARM926, "arm926"}, + {ARM_CPUID_ARM946, "arm946"}, + {ARM_CPUID_ARM1026, "arm1026"}, + {ARM_CPUID_ARM1136, "arm1136"}, + {ARM_CPUID_ARM1136_R2, "arm1136-r2"}, + {ARM_CPUID_ARM1176, "arm1176"}, + {ARM_CPUID_ARM11MPCORE, "arm11mpcore"}, + {ARM_CPUID_CORTEXM3, "cortex-m3"}, + {ARM_CPUID_CORTEXA8, "cortex-a8"}, + {ARM_CPUID_CORTEXA9, "cortex-a9"}, + {ARM_CPUID_CORTEXA15, "cortex-a15"}, + {ARM_CPUID_TI925T, "ti925t"}, + {ARM_CPUID_PXA250, "pxa250"}, + {ARM_CPUID_SA1100, "sa1100"}, + {ARM_CPUID_SA1110, "sa1110"}, + {ARM_CPUID_PXA255, "pxa255"}, + {ARM_CPUID_PXA260, "pxa260"}, + {ARM_CPUID_PXA261, "pxa261"}, + {ARM_CPUID_PXA262, "pxa262"}, + {ARM_CPUID_PXA270, "pxa270"}, + {ARM_CPUID_PXA270_A0, "pxa270-a0"}, + {ARM_CPUID_PXA270_A1, "pxa270-a1"}, + {ARM_CPUID_PXA270_B0, "pxa270-b0"}, + {ARM_CPUID_PXA270_B1, "pxa270-b1"}, + {ARM_CPUID_PXA270_C0, "pxa270-c0"}, + {ARM_CPUID_PXA270_C5, "pxa270-c5"}, + {ARM_CPUID_ANY, "any"}, + {0, NULL}}; + +void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) { + int i; + + (*cpu_fprintf)(f, "Available CPUs:\n"); + for (i = 0; arm_cpu_names[i].name; i++) { + (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name); + } +} + +/* return 0 if not found */ +static uint32_t cpu_arm_find_by_name(const char *name) { + int i; + uint32_t id; + + id = 0; + for (i = 0; arm_cpu_names[i].name; i++) { + if (strcmp(name, arm_cpu_names[i].name) == 0) { + id = arm_cpu_names[i].id; + break; + } + } + return id; +} + +static int bad_mode_switch(CPUARMState *env, int mode) { + /* Return true if it is not valid for us to switch to + * this CPU mode (ie all the UNPREDICTABLE cases in + * the ARM ARM CPSRWriteByInstr pseudocode). + */ + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_FIQ: + return 0; + default: + return 1; + } +} + +uint32_t cpsr_read(CPUARMState *env) { + int ZF; + ZF = (env->ZF == 0); + return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | + (env->QF << 27) | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) | ((env->condexec_bits & 0xfc) << 8) | + (env->GE << 16); +} + +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { + if (mask & CPSR_NZCV) { + env->ZF = (~val) & CPSR_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & CPSR_T) + env->thumb = ((val & CPSR_T) != 0); + if (mask & CPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & CPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & CPSR_GE) { + env->GE = (val >> 16) & 0xf; + } + + if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { + if (bad_mode_switch(env, val & CPSR_M)) { + /* Attempt to switch to an invalid mode: this is UNPREDICTABLE. + * We choose to ignore the attempt and leave the CPSR M field + * untouched. + */ + mask &= ~CPSR_M; + } else { + switch_mode(env, val & CPSR_M); + } + } + mask &= ~CACHED_CPSR_BITS; + env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); +} + +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) { + uint32_t res; + res = (uint16_t)(int8_t) x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(uxtb16)(uint32_t x) { + uint32_t res; + res = (uint16_t)(uint8_t) x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(clz)(uint32_t x) { + uint32_t res; + res= (uint32_t)clz32(x); + return res; +} + +int32_t HELPER(sdiv)(int32_t num, int32_t den) { + if (den == 0) + return 0; + if (num == INT_MIN && den == -1) + return INT_MIN; + return num / den; +} + +uint32_t HELPER(udiv)(uint32_t num, uint32_t den) { + if (den == 0) + return 0; + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) { + x = ((x & 0xff000000) >> 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8) | ((x & 0x000000ff) << 24); + x = ((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4); + x = ((x & 0x88888888) >> 3) | ((x & 0x44444444) >> 1) | ((x & 0x22222222) << 1) | ((x & 0x11111111) << 3); + return x; +} + +uint32_t HELPER(abs)(uint32_t x) { + return ((int32_t) x < 0) ? -x : x; +} + +/* Map CPU modes onto saved register banks. */ +int bank_number(CPUARMState *env, int mode) { + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + return 0; + case ARM_CPU_MODE_SVC: + return 1; + case ARM_CPU_MODE_ABT: + return 2; + case ARM_CPU_MODE_UND: + return 3; + case ARM_CPU_MODE_IRQ: + return 4; + case ARM_CPU_MODE_FIQ: + return 5; + } + cpu_abort(env, "Bad mode %x\n", mode); + return -1; +} + +void switch_mode(CPUARMState *env, int mode) { + int old_mode; + int i; + + old_mode = env->uncached_cpsr & CPSR_M; + if (mode == old_mode) + return; + + if (old_mode == ARM_CPU_MODE_FIQ) { + memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + } else if (mode == ARM_CPU_MODE_FIQ) { + memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + } + + i = bank_number(env, old_mode); + env->banked_r13[i] = env->regs[13]; + env->banked_r14[i] = env->regs[14]; + env->banked_spsr[i] = env->spsr; + + i = bank_number(env, mode); + env->regs[13] = env->banked_r13[i]; + env->regs[14] = env->banked_r14[i]; + env->spsr = env->banked_spsr[i]; +} + +static void v7m_push(CPUARMState *env, uint32_t val) { + env->regs[13] -= 4; + stl_phys(env->regs[13], val); +} + +static uint32_t v7m_pop(CPUARMState *env) { + uint32_t val; + val = ldl_phys(env->regs[13]); + env->regs[13] += 4; + return val; +} + +/* Switch to V7M main or process stack pointer. */ +static void switch_v7m_sp(CPUARMState *env, int process) { + uint32_t tmp; + if (env->v7m.current_sp != process) { + tmp = env->v7m.other_sp; + env->v7m.other_sp = env->regs[13]; + env->regs[13] = tmp; + env->v7m.current_sp = process; + } +} + +static void do_v7m_exception_exit(CPUARMState *env) { + uint32_t type; + uint32_t xpsr; + + type = env->regs[15]; + if (env->v7m.exception != 0) + armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); + + /* Switch to the target stack. */ + switch_v7m_sp(env, (type & 4) != 0); + /* Pop registers. */ + env->regs[0] = v7m_pop(env); + env->regs[1] = v7m_pop(env); + env->regs[2] = v7m_pop(env); + env->regs[3] = v7m_pop(env); + env->regs[12] = v7m_pop(env); + env->regs[14] = v7m_pop(env); + env->regs[15] = v7m_pop(env); + xpsr = v7m_pop(env); + xpsr_write(env, xpsr, 0xfffffdff); + /* Undo stack alignment. */ + if (xpsr & 0x200) + env->regs[13] |= 4; + /* ??? The exception return type specifies Thread/Handler mode. However + this is also implied by the xPSR value. Not sure what to do + if there is a mismatch. */ + /* ??? Likewise for mismatches between the CONTROL register and the stack + pointer. */ +} + +static void do_interrupt_v7m(CPUARMState *env) { + uint32_t xpsr = xpsr_read(env); + uint32_t lr; + uint32_t addr; + + lr = 0xfffffff1; + if (env->v7m.current_sp) + lr |= 4; + if (env->v7m.exception == 0) + lr |= 8; + + /* For exceptions we just mark as pending on the NVIC, and let that + handle it. */ + /* TODO: Need to escalate if the current priority is higher than the + one we're raising. */ + switch (env->exception_index) { + case EXCP_UDEF: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); + return; + case EXCP_SWI: + env->regs[15] += 2; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC); + return; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM); + return; + case EXCP_BKPT: + if (semihosting_enabled) { + int nr; + nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + if (nr == 0xab) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + return; + } + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG); + return; + case EXCP_IRQ: + env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic); + break; + case EXCP_EXCEPTION_EXIT: + do_v7m_exception_exit(env); + return; + default: + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + + /* Align stack pointer. */ + /* ??? Should only do this if Configuration Control Register + STACKALIGN bit is set. */ + if (env->regs[13] & 4) { + env->regs[13] -= 4; + xpsr |= 0x200; + } + /* Switch to the handler mode. */ + v7m_push(env, xpsr); + v7m_push(env, env->regs[15]); + v7m_push(env, env->regs[14]); + v7m_push(env, env->regs[12]); + v7m_push(env, env->regs[3]); + v7m_push(env, env->regs[2]); + v7m_push(env, env->regs[1]); + v7m_push(env, env->regs[0]); + switch_v7m_sp(env, 0); + /* Clear IT bits */ + env->condexec_bits = 0; + env->regs[14] = lr; + addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); + env->regs[15] = addr & 0xfffffffe; + env->thumb = addr & 1; +} + +/* Handle a CPU exception. */ +void do_interrupt(CPUARMState *env) { + uint32_t addr; + uint32_t mask; + int new_mode; + uint32_t offset; + + if (IS_M(env)) { + do_interrupt_v7m(env); + return; + } + /* TODO: Vectored interrupt controller. */ + switch (env->exception_index) { + case EXCP_UDEF: + new_mode = ARM_CPU_MODE_UND; + addr = 0x04; + mask = CPSR_I; + if (env->thumb) + offset = 2; + else + offset = 4; + break; + case EXCP_SWI: + if (semihosting_enabled) { + /* Check for semihosting interrupt. */ + if (env->thumb) { + mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff; + } else { + mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code) & 0xffffff; + } + /* Only intercept calls from privileged modes, to provide some + semblance of security. */ + if (((mask == 0x123456 && !env->thumb) || (mask == 0xab && env->thumb)) && + (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { + env->regs[0] = do_arm_semihosting(env); + return; + } + } + new_mode = ARM_CPU_MODE_SVC; + addr = 0x08; + mask = CPSR_I; + /* The PC already points to the next instruction. */ + offset = 0; + break; + case EXCP_BKPT: + /* See if this is a semihosting syscall. */ + if (env->thumb && semihosting_enabled) { + mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + if (mask == 0xab && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + return; + } + } + env->cp15.c5_insn = 2; + /* Fall through to prefetch abort. */ + case EXCP_PREFETCH_ABORT: + new_mode = ARM_CPU_MODE_ABT; + addr = 0x0c; + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_DATA_ABORT: + new_mode = ARM_CPU_MODE_ABT; + addr = 0x10; + mask = CPSR_A | CPSR_I; + offset = 8; + break; + case EXCP_IRQ: + new_mode = ARM_CPU_MODE_IRQ; + addr = 0x18; + /* Disable IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_FIQ: + new_mode = ARM_CPU_MODE_FIQ; + addr = 0x1c; + /* Disable FIQ, IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 4; + break; + default: + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + /* High vectors. */ + if (env->cp15.c1_sys & (1 << 13)) { + addr += 0xffff0000; + } + switch_mode(env, new_mode); + env->spsr = cpsr_read(env); + /* Clear IT bits. */ + env->condexec_bits = 0; + /* Switch to the new mode, and to the correct instruction set. */ + env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; + env->uncached_cpsr |= mask; + /* this is a lie, as the was no c1_sys on V4T/V5, but who cares + * and we should just guard the thumb mode on V4 */ + if (arm_feature(env, ARM_FEATURE_V4T)) { + env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0; + } + env->regs[14] = env->regs[15] + offset; + env->regs[15] = addr; + env->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +/* Check section/page access permissions. + Returns the page protection flags, or zero if the access is not + permitted. */ +static inline int check_ap(CPUARMState *env, int ap, int domain_prot, int access_type, int is_user) { + int prot_ro; + + if (domain_prot == 3) { + return PAGE_READ | PAGE_WRITE; + } + + if (access_type == 1) + prot_ro = 0; + else + prot_ro = PAGE_READ; + + switch (ap) { + case 0: + if (access_type == 1) + return 0; + switch ((env->cp15.c1_sys >> 8) & 3) { + case 1: + return is_user ? 0 : PAGE_READ; + case 2: + return PAGE_READ; + default: + return 0; + } + case 1: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 2: + if (is_user) + return prot_ro; + else + return PAGE_READ | PAGE_WRITE; + case 3: + return PAGE_READ | PAGE_WRITE; + case 4: /* Reserved. */ + return 0; + case 5: + return is_user ? 0 : prot_ro; + case 6: + return prot_ro; + case 7: + if (!arm_feature(env, ARM_FEATURE_V6K)) + return 0; + return prot_ro; + default: + abort(); + } +} + +static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address) { + uint32_t table; + + if (address & env->cp15.c2_mask) + table = env->cp15.c2_base1 & 0xffffc000; + else + table = env->cp15.c2_base0 & env->cp15.c2_base_mask; + + table |= (address >> 18) & 0x3ffc; + return table; +} + +static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, int is_user, uint32_t *phys_ptr, + int *prot, target_ulong *page_size) { + int code; + uint32_t table; + uint32_t desc; + int type; + int ap; + int domain; + int domain_prot; + uint32_t phys_addr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + table = get_level1_table_address(env, address); + desc = ldl_phys(table); + type = (desc & 3); + domain = (desc >> 5) & 0x0f; + domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; + if (type == 0) { + /* Section translation fault. */ + code = 5; + goto do_fault; + } + if (domain_prot == 0 || domain_prot == 2) { + if (type == 2) + code = 9; /* Section domain fault. */ + else + code = 11; /* Page domain fault. */ + goto do_fault; + } + if (type == 2) { + /* 1Mb section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + ap = (desc >> 10) & 3; + code = 13; + *page_size = 1024 * 1024; + } else { + /* Lookup l2 entry. */ + if (type == 1) { + /* Coarse pagetable. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + } else { + /* Fine pagetable. */ + table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); + } + desc = ldl_phys(table); + switch (desc & 3) { + case 0: /* Page translation fault. */ + code = 7; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + *page_size = 0x10000; + break; + case 2: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + *page_size = 0x1000; + break; + case 3: /* 1k page. */ + if (type == 1) { + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + } else { + /* Page translation fault. */ + code = 7; + goto do_fault; + } + } else { + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); + } + ap = (desc >> 4) & 3; + *page_size = 0x400; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + code = 15; + } + *prot = check_ap(env, ap, domain_prot, access_type, is_user); + if (!*prot) { + /* Access permission fault. */ + goto do_fault; + } + *prot |= PAGE_EXEC; + *phys_ptr = phys_addr; + return 0; +do_fault: + return code | (domain << 4); +} + +static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, int is_user, uint32_t *phys_ptr, + int *prot, target_ulong *page_size) { + int code; + uint32_t table; + uint32_t desc; + uint32_t xn; + int type; + int ap; + int domain; + int domain_prot; + uint32_t phys_addr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + table = get_level1_table_address(env, address); + desc = ldl_phys(table); + type = (desc & 3); + if (type == 0) { + /* Section translation fault. */ + code = 5; + domain = 0; + goto do_fault; + } else if (type == 2 && (desc & (1 << 18))) { + /* Supersection. */ + domain = 0; + } else { + /* Section or page. */ + domain = (desc >> 5) & 0x0f; + } + domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; + if (domain_prot == 0 || domain_prot == 2) { + if (type == 2) + code = 9; /* Section domain fault. */ + else + code = 11; /* Page domain fault. */ + goto do_fault; + } + if (type == 2) { + if (desc & (1 << 18)) { + /* Supersection. */ + phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); + *page_size = 0x1000000; + } else { + /* Section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + *page_size = 0x100000; + } + ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); + xn = desc & (1 << 4); + code = 13; + } else { + /* Lookup l2 entry. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + desc = ldl_phys(table); + ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); + switch (desc & 3) { + case 0: /* Page translation fault. */ + code = 7; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + xn = desc & (1 << 15); + *page_size = 0x10000; + break; + case 2: + case 3: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + xn = desc & 1; + *page_size = 0x1000; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + code = 15; + } + if (domain_prot == 3) { + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + if (xn && access_type == 2) + goto do_fault; + + /* The simplified model uses AP[0] as an access control bit. */ + if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) { + /* Access flag fault. */ + code = (code == 15) ? 6 : 3; + goto do_fault; + } + *prot = check_ap(env, ap, domain_prot, access_type, is_user); + if (!*prot) { + /* Access permission fault. */ + goto do_fault; + } + if (!xn) { + *prot |= PAGE_EXEC; + } + } + *phys_ptr = phys_addr; + return 0; +do_fault: + return code | (domain << 4); +} + +static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type, int is_user, uint32_t *phys_ptr, + int *prot) { + int n; + uint32_t mask; + uint32_t base; + + *phys_ptr = address; + for (n = 7; n >= 0; n--) { + base = env->cp15.c6_region[n]; + if ((base & 1) == 0) + continue; + mask = 1 << ((base >> 1) & 0x1f); + /* Keep this shift separate from the above to avoid an + (undefined) << 32. */ + mask = (mask << 1) - 1; + if (((base ^ address) & ~mask) == 0) + break; + } + if (n < 0) + return 2; + + if (access_type == 2) { + mask = env->cp15.c5_insn; + } else { + mask = env->cp15.c5_data; + } + mask = (mask >> (n * 4)) & 0xf; + switch (mask) { + case 0: + return 1; + case 1: + if (is_user) + return 1; + *prot = PAGE_READ | PAGE_WRITE; + break; + case 2: + *prot = PAGE_READ; + if (!is_user) + *prot |= PAGE_WRITE; + break; + case 3: + *prot = PAGE_READ | PAGE_WRITE; + break; + case 5: + if (is_user) + return 1; + *prot = PAGE_READ; + break; + case 6: + *prot = PAGE_READ; + break; + default: + /* Bad permission. */ + return 1; + } + *prot |= PAGE_EXEC; + return 0; +} + +static inline int get_phys_addr(CPUARMState *env, uint32_t address, int access_type, int is_user, uint32_t *phys_ptr, + int *prot, target_ulong *page_size) { + /* Fast Context Switch Extension. */ + if (address < 0x02000000) + address += env->cp15.c13_fcse; + + if ((env->cp15.c1_sys & 1) == 0) { + /* MMU/MPU disabled. */ + *phys_ptr = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + *page_size = TARGET_PAGE_SIZE; + return 0; + } else if (arm_feature(env, ARM_FEATURE_MPU)) { + *page_size = TARGET_PAGE_SIZE; + return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, prot); + } else if (env->cp15.c1_sys & (1 << 23)) { + return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, prot, page_size); + } else { + return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, prot, page_size); + } +} + +int cpu_arm_handle_mmu_fault(CPUARMState *env, target_ulong address, int access_type, int mmu_idx) { + uint32_t phys_addr; + target_ulong page_size; + int prot; + int ret, is_user; + + is_user = mmu_idx == MMU_USER_IDX; + ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot, &page_size); + if (ret == 0) { + /* Map a single [sub]page. */ + phys_addr &= ~(uint32_t) 0x3ff; + address &= ~(uint32_t) 0x3ff; + tlb_set_page(env, address, phys_addr, prot, mmu_idx, page_size); + return 0; + } + + if (access_type == 2) { + env->cp15.c5_insn = ret; + env->cp15.c6_insn = address; + env->exception_index = EXCP_PREFETCH_ABORT; + } else { + env->cp15.c5_data = ret; + if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) + env->cp15.c5_data |= (1 << 11); + env->cp15.c6_data = address; + env->exception_index = EXCP_DATA_ABORT; + } + return 1; +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr) { + uint32_t phys_addr; + target_ulong page_size; + int prot; + int ret; + + ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size); + + if (ret != 0) + return -1; + + return phys_addr; +} + +void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val) { + int cp_num = (insn >> 8) & 0xf; + int cp_info = (insn >> 5) & 7; + int src = (insn >> 16) & 0xf; + int operand = insn & 0xf; + + if (env->cp[cp_num].cp_write) + env->cp[cp_num].cp_write(env->cp[cp_num].opaque, cp_info, src, operand, val); +} + +uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn) { + int cp_num = (insn >> 8) & 0xf; + int cp_info = (insn >> 5) & 7; + int dest = (insn >> 16) & 0xf; + int operand = insn & 0xf; + + if (env->cp[cp_num].cp_read) + return env->cp[cp_num].cp_read(env->cp[cp_num].opaque, cp_info, dest, operand); + return 0; +} + +/* Return basic MPU access permission bits. */ +static uint32_t simple_mpu_ap_bits(uint32_t val) { + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val >> i) & mask; + mask <<= 2; + } + return ret; +} + +/* Pad basic MPU access permission bits to extended format. */ +static uint32_t extended_mpu_ap_bits(uint32_t val) { + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val & mask) << i; + mask <<= 2; + } + return ret; +} + +void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val) { + int op1; + int op2; + int crm; + + op1 = (insn >> 21) & 7; + op2 = (insn >> 5) & 7; + crm = insn & 0xf; + switch ((insn >> 16) & 0xf) { + case 0: + /* ID codes. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) + break; + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + break; + if (arm_feature(env, ARM_FEATURE_V7) && op1 == 2 && crm == 0 && op2 == 0) { + env->cp15.c0_cssel = val & 0xf; + break; + } + goto bad_reg; + case 1: /* System configuration. */ + if (arm_feature(env, ARM_FEATURE_V7) && op1 == 0 && crm == 1 && op2 == 0) { + env->cp15.c1_scr = val; + break; + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: + if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0) + env->cp15.c1_sys = val; + /* ??? Lots of these bits are not implemented. */ + /* This may enable/disable the MMU, so do a TLB flush. */ + tlb_flush(env, 1); + break; + case 1: /* Auxiliary control register. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + env->cp15.c1_xscaleauxcr = val; + break; + } + /* Not implemented. */ + break; + case 2: + if (arm_feature(env, ARM_FEATURE_XSCALE)) + goto bad_reg; + if (env->cp15.c1_coproc != val) { + env->cp15.c1_coproc = val; + /* ??? Is this safe when called from within a TB? */ + tb_flush(env); + } + break; + default: + goto bad_reg; + } + break; + case 2: /* MMU Page table control / MPU cache control. */ + if (arm_feature(env, ARM_FEATURE_MPU)) { + switch (op2) { + case 0: + env->cp15.c2_data = val; + break; + case 1: + env->cp15.c2_insn = val; + break; + default: + goto bad_reg; + } + } else { + switch (op2) { + case 0: + env->cp15.c2_base0 = val; + break; + case 1: + env->cp15.c2_base1 = val; + break; + case 2: + val &= 7; + env->cp15.c2_control = val; + env->cp15.c2_mask = ~(((uint32_t) 0xffffffffu) >> val); + env->cp15.c2_base_mask = ~((uint32_t) 0x3fffu >> val); + break; + default: + goto bad_reg; + } + } + break; + case 3: /* MMU Domain access control / MPU write buffer control. */ + env->cp15.c3 = val; + tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ + break; + case 4: /* Reserved. */ + goto bad_reg; + case 5: /* MMU Fault status / MPU access permission. */ + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: + if (arm_feature(env, ARM_FEATURE_MPU)) + val = extended_mpu_ap_bits(val); + env->cp15.c5_data = val; + break; + case 1: + if (arm_feature(env, ARM_FEATURE_MPU)) + val = extended_mpu_ap_bits(val); + env->cp15.c5_insn = val; + break; + case 2: + if (!arm_feature(env, ARM_FEATURE_MPU)) + goto bad_reg; + env->cp15.c5_data = val; + break; + case 3: + if (!arm_feature(env, ARM_FEATURE_MPU)) + goto bad_reg; + env->cp15.c5_insn = val; + break; + default: + goto bad_reg; + } + break; + case 6: /* MMU Fault address / MPU base/size. */ + if (arm_feature(env, ARM_FEATURE_MPU)) { + if (crm >= 8) + goto bad_reg; + env->cp15.c6_region[crm] = val; + } else { + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: + env->cp15.c6_data = val; + break; + case 1: /* ??? This is WFAR on armv6 */ + case 2: + env->cp15.c6_insn = val; + break; + default: + goto bad_reg; + } + } + break; + case 7: /* Cache control. */ + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; + if (op1 != 0) { + goto bad_reg; + } + /* No cache, so nothing to do except VA->PA translations. */ + if (arm_feature(env, ARM_FEATURE_VAPA)) { + switch (crm) { + case 4: + if (arm_feature(env, ARM_FEATURE_V7)) { + env->cp15.c7_par = val & 0xfffff6ff; + } else { + env->cp15.c7_par = val & 0xfffff1ff; + } + break; + case 8: { + uint32_t phys_addr; + target_ulong page_size; + int prot; + int ret, is_user = op2 & 2; + int access_type = op2 & 1; + + if (op2 & 4) { + /* Other states are only available with TrustZone */ + goto bad_reg; + } + ret = get_phys_addr(env, val, access_type, is_user, &phys_addr, &prot, &page_size); + if (ret == 0) { + /* We do not set any attribute bits in the PAR */ + if (page_size == (1 << 24) && arm_feature(env, ARM_FEATURE_V7)) { + env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1; + } else { + env->cp15.c7_par = phys_addr & 0xfffff000; + } + } else { + env->cp15.c7_par = + ((ret & (10 << 1)) >> 5) | ((ret & (12 << 1)) >> 6) | ((ret & 0xf) << 1) | 1; + } + break; + } + } + } + break; + case 8: /* MMU TLB control. */ + switch (op2) { + case 0: /* Invalidate all (TLBIALL) */ + tlb_flush(env, 1); + break; + case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ + tlb_flush_page(env, val & TARGET_PAGE_MASK); + break; + case 2: /* Invalidate by ASID (TLBIASID) */ + tlb_flush(env, val == 0); + break; + case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ + tlb_flush_page(env, val & TARGET_PAGE_MASK); + break; + default: + goto bad_reg; + } + break; + case 9: + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + break; + if (arm_feature(env, ARM_FEATURE_STRONGARM)) + break; /* Ignore ReadBuffer access */ + switch (crm) { + case 0: /* Cache lockdown. */ + switch (op1) { + case 0: /* L1 cache. */ + switch (op2) { + case 0: + env->cp15.c9_data = val; + break; + case 1: + env->cp15.c9_insn = val; + break; + default: + goto bad_reg; + } + break; + case 1: /* L2 cache. */ + /* Ignore writes to L2 lockdown/auxiliary registers. */ + break; + default: + goto bad_reg; + } + break; + case 1: /* TCM memory region registers. */ + /* Not implemented. */ + goto bad_reg; + case 12: /* Performance monitor control */ + /* Performance monitors are implementation defined in v7, + * but with an ARM recommended set of registers, which we + * follow (although we don't actually implement any counters) + */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 0: /* performance monitor control register */ + /* only the DP, X, D and E bits are writable */ + env->cp15.c9_pmcr &= ~0x39; + env->cp15.c9_pmcr |= (val & 0x39); + break; + case 1: /* Count enable set register */ + val &= (1 << 31); + env->cp15.c9_pmcnten |= val; + break; + case 2: /* Count enable clear */ + val &= (1 << 31); + env->cp15.c9_pmcnten &= ~val; + break; + case 3: /* Overflow flag status */ + env->cp15.c9_pmovsr &= ~val; + break; + case 4: /* Software increment */ + /* RAZ/WI since we don't implement the software-count event */ + break; + case 5: /* Event counter selection register */ + /* Since we don't implement any events, writing to this register + * is actually UNPREDICTABLE. So we choose to RAZ/WI. + */ + break; + default: + goto bad_reg; + } + break; + case 13: /* Performance counters */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 0: /* Cycle count register: not implemented, so RAZ/WI */ + break; + case 1: /* Event type select */ + env->cp15.c9_pmxevtyper = val & 0xff; + break; + case 2: /* Event count register */ + /* Unimplemented (we have no events), RAZ/WI */ + break; + default: + goto bad_reg; + } + break; + case 14: /* Performance monitor control */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 0: /* user enable */ + env->cp15.c9_pmuserenr = val & 1; + /* changes access rights for cp registers, so flush tbs */ + tb_flush(env); + break; + case 1: /* interrupt enable set */ + /* We have no event counters so only the C bit can be changed */ + val &= (1 << 31); + env->cp15.c9_pminten |= val; + break; + case 2: /* interrupt enable clear */ + val &= (1 << 31); + env->cp15.c9_pminten &= ~val; + break; + } + break; + default: + goto bad_reg; + } + break; + case 10: /* MMU TLB lockdown. */ + /* ??? TLB lockdown not implemented. */ + break; + case 12: /* Reserved. */ + goto bad_reg; + case 13: /* Process ID. */ + switch (op2) { + case 0: + /* Unlike real hardware the qemu TLB uses virtual addresses, + not modified virtual addresses, so this causes a TLB flush. + */ + if (env->cp15.c13_fcse != val) + tlb_flush(env, 1); + env->cp15.c13_fcse = val; + break; + case 1: + /* This changes the ASID, so do a TLB flush. */ + if (env->cp15.c13_context != val && !arm_feature(env, ARM_FEATURE_MPU)) + tlb_flush(env, 0); + env->cp15.c13_context = val; + break; + default: + goto bad_reg; + } + break; + case 14: /* Generic timer */ + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + /* Dummy implementation: RAZ/WI for all */ + break; + } + goto bad_reg; + case 15: /* Implementation specific. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + if (op2 == 0 && crm == 1) { + if (env->cp15.c15_cpar != (val & 0x3fff)) { + /* Changes cp0 to cp13 behavior, so needs a TB flush. */ + tb_flush(env); + env->cp15.c15_cpar = val & 0x3fff; + } + break; + } + goto bad_reg; + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + switch (crm) { + case 0: + break; + case 1: /* Set TI925T configuration. */ + env->cp15.c15_ticonfig = val & 0xe7; + env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */ + ARM_CPUID_TI915T + : ARM_CPUID_TI925T; + break; + case 2: /* Set I_max. */ + env->cp15.c15_i_max = val; + break; + case 3: /* Set I_min. */ + env->cp15.c15_i_min = val; + break; + case 4: /* Set thread-ID. */ + env->cp15.c15_threadid = val & 0xffff; + break; + case 8: /* Wait-for-interrupt (deprecated). */ + cpu_interrupt(env, CPU_INTERRUPT_HALT); + break; + default: + goto bad_reg; + } + } + if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) { + switch (crm) { + case 0: + if ((op1 == 0) && (op2 == 0)) { + env->cp15.c15_power_control = val; + } else if ((op1 == 0) && (op2 == 1)) { + env->cp15.c15_diagnostic = val; + } else if ((op1 == 0) && (op2 == 2)) { + env->cp15.c15_power_diagnostic = val; + } + default: + break; + } + } + break; + } + return; +bad_reg: + /* ??? For debugging only. Should raise illegal instruction exception. */ + cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n", (insn >> 16) & 0xf, crm, op1, op2); +} + +uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn) { + int op1; + int op2; + int crm; + + op1 = (insn >> 21) & 7; + op2 = (insn >> 5) & 7; + crm = insn & 0xf; + switch ((insn >> 16) & 0xf) { + case 0: /* ID codes. */ + switch (op1) { + case 0: + switch (crm) { + case 0: + switch (op2) { + case 0: /* Device ID. */ + return env->cp15.c0_cpuid; + case 1: /* Cache Type. */ + return env->cp15.c0_cachetype; + case 2: /* TCM status. */ + return 0; + case 3: /* TLB type register. */ + return 0; /* No lockable TLB entries. */ + case 5: /* MPIDR */ + /* The MPIDR was standardised in v7; prior to + * this it was implemented only in the 11MPCore. + * For all other pre-v7 cores it does not exist. + */ + if (arm_feature(env, ARM_FEATURE_V7) || ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) { + int mpidr = env->cpu_index; + /* We don't support setting cluster ID ([8..11]) + * so these bits always RAZ. + */ + if (arm_feature(env, ARM_FEATURE_V7MP)) { + mpidr |= (1 << 31); + /* Cores which are uniprocessor (non-coherent) + * but still implement the MP extensions set + * bit 30. (For instance, A9UP.) However we do + * not currently model any of those cores. + */ + } + return mpidr; + } + /* otherwise fall through to the unimplemented-reg case */ + default: + goto bad_reg; + } + case 1: + if (!arm_feature(env, ARM_FEATURE_V6)) + goto bad_reg; + return env->cp15.c0_c1[op2]; + case 2: + if (!arm_feature(env, ARM_FEATURE_V6)) + goto bad_reg; + return env->cp15.c0_c2[op2]; + case 3: + case 4: + case 5: + case 6: + case 7: + return 0; + default: + goto bad_reg; + } + case 1: + /* These registers aren't documented on arm11 cores. However + Linux looks at them anyway. */ + if (!arm_feature(env, ARM_FEATURE_V6)) + goto bad_reg; + if (crm != 0) + goto bad_reg; + if (!arm_feature(env, ARM_FEATURE_V7)) + return 0; + + switch (op2) { + case 0: + return env->cp15.c0_ccsid[env->cp15.c0_cssel]; + case 1: + return env->cp15.c0_clid; + case 7: + return 0; + } + goto bad_reg; + case 2: + if (op2 != 0 || crm != 0) + goto bad_reg; + return env->cp15.c0_cssel; + default: + goto bad_reg; + } + case 1: /* System configuration. */ + if (arm_feature(env, ARM_FEATURE_V7) && op1 == 0 && crm == 1 && op2 == 0) { + return env->cp15.c1_scr; + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: /* Control register. */ + return env->cp15.c1_sys; + case 1: /* Auxiliary control register. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) + return env->cp15.c1_xscaleauxcr; + if (!arm_feature(env, ARM_FEATURE_AUXCR)) + goto bad_reg; + switch (ARM_CPUID(env)) { + case ARM_CPUID_ARM1026: + return 1; + case ARM_CPUID_ARM1136: + case ARM_CPUID_ARM1136_R2: + case ARM_CPUID_ARM1176: + return 7; + case ARM_CPUID_ARM11MPCORE: + return 1; + case ARM_CPUID_CORTEXA8: + return 2; + case ARM_CPUID_CORTEXA9: + case ARM_CPUID_CORTEXA15: + return 0; + default: + goto bad_reg; + } + case 2: /* Coprocessor access register. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) + goto bad_reg; + return env->cp15.c1_coproc; + default: + goto bad_reg; + } + case 2: /* MMU Page table control / MPU cache control. */ + if (arm_feature(env, ARM_FEATURE_MPU)) { + switch (op2) { + case 0: + return env->cp15.c2_data; + break; + case 1: + return env->cp15.c2_insn; + break; + default: + goto bad_reg; + } + } else { + switch (op2) { + case 0: + return env->cp15.c2_base0; + case 1: + return env->cp15.c2_base1; + case 2: + return env->cp15.c2_control; + default: + goto bad_reg; + } + } + case 3: /* MMU Domain access control / MPU write buffer control. */ + return env->cp15.c3; + case 4: /* Reserved. */ + goto bad_reg; + case 5: /* MMU Fault status / MPU access permission. */ + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: + if (arm_feature(env, ARM_FEATURE_MPU)) + return simple_mpu_ap_bits(env->cp15.c5_data); + return env->cp15.c5_data; + case 1: + if (arm_feature(env, ARM_FEATURE_MPU)) + return simple_mpu_ap_bits(env->cp15.c5_insn); + return env->cp15.c5_insn; + case 2: + if (!arm_feature(env, ARM_FEATURE_MPU)) + goto bad_reg; + return env->cp15.c5_data; + case 3: + if (!arm_feature(env, ARM_FEATURE_MPU)) + goto bad_reg; + return env->cp15.c5_insn; + default: + goto bad_reg; + } + case 6: /* MMU Fault address. */ + if (arm_feature(env, ARM_FEATURE_MPU)) { + if (crm >= 8) + goto bad_reg; + return env->cp15.c6_region[crm]; + } else { + if (arm_feature(env, ARM_FEATURE_OMAPCP)) + op2 = 0; + switch (op2) { + case 0: + return env->cp15.c6_data; + case 1: + if (arm_feature(env, ARM_FEATURE_V6)) { + /* Watchpoint Fault Adrress. */ + return 0; /* Not implemented. */ + } else { + /* Instruction Fault Adrress. */ + /* Arm9 doesn't have an IFAR, but implementing it anyway + shouldn't do any harm. */ + return env->cp15.c6_insn; + } + case 2: + if (arm_feature(env, ARM_FEATURE_V6)) { + /* Instruction Fault Adrress. */ + return env->cp15.c6_insn; + } else { + goto bad_reg; + } + default: + goto bad_reg; + } + } + case 7: /* Cache control. */ + if (crm == 4 && op1 == 0 && op2 == 0) { + return env->cp15.c7_par; + } + /* FIXME: Should only clear Z flag if destination is r15. */ + env->ZF = 0; + return 0; + case 8: /* MMU TLB control. */ + goto bad_reg; + case 9: + switch (crm) { + case 0: /* Cache lockdown */ + switch (op1) { + case 0: /* L1 cache. */ + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + return 0; + } + switch (op2) { + case 0: + return env->cp15.c9_data; + case 1: + return env->cp15.c9_insn; + default: + goto bad_reg; + } + case 1: /* L2 cache */ + /* L2 Lockdown and Auxiliary control. */ + switch (op2) { + case 0: + /* L2 cache lockdown (A8 only) */ + return 0; + case 2: + /* L2 cache auxiliary control (A8) or control (A15) */ + if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) { + /* Linux wants the number of processors from here. + * Might as well set the interrupt-controller bit too. + */ + return ((smp_cpus - 1) << 24) | (1 << 23); + } + return 0; + case 3: + /* L2 cache extended control (A15) */ + return 0; + default: + goto bad_reg; + } + default: + goto bad_reg; + } + break; + case 12: /* Performance monitor control */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 0: /* performance monitor control register */ + return env->cp15.c9_pmcr; + case 1: /* count enable set */ + case 2: /* count enable clear */ + return env->cp15.c9_pmcnten; + case 3: /* overflow flag status */ + return env->cp15.c9_pmovsr; + case 4: /* software increment */ + case 5: /* event counter selection register */ + return 0; /* Unimplemented, RAZ/WI */ + default: + goto bad_reg; + } + case 13: /* Performance counters */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 1: /* Event type select */ + return env->cp15.c9_pmxevtyper; + case 0: /* Cycle count register */ + case 2: /* Event count register */ + /* Unimplemented, so RAZ/WI */ + return 0; + default: + goto bad_reg; + } + case 14: /* Performance monitor control */ + if (!arm_feature(env, ARM_FEATURE_V7)) { + goto bad_reg; + } + switch (op2) { + case 0: /* user enable */ + return env->cp15.c9_pmuserenr; + case 1: /* interrupt enable set */ + case 2: /* interrupt enable clear */ + return env->cp15.c9_pminten; + default: + goto bad_reg; + } + default: + goto bad_reg; + } + break; + case 10: /* MMU TLB lockdown. */ + /* ??? TLB lockdown not implemented. */ + return 0; + case 11: /* TCM DMA control. */ + case 12: /* Reserved. */ + goto bad_reg; + case 13: /* Process ID. */ + switch (op2) { + case 0: + return env->cp15.c13_fcse; + case 1: + return env->cp15.c13_context; + default: + goto bad_reg; + } + case 14: /* Generic timer */ + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + /* Dummy implementation: RAZ/WI for all */ + return 0; + } + goto bad_reg; + case 15: /* Implementation specific. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + if (op2 == 0 && crm == 1) + return env->cp15.c15_cpar; + + goto bad_reg; + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + switch (crm) { + case 0: + return 0; + case 1: /* Read TI925T configuration. */ + return env->cp15.c15_ticonfig; + case 2: /* Read I_max. */ + return env->cp15.c15_i_max; + case 3: /* Read I_min. */ + return env->cp15.c15_i_min; + case 4: /* Read thread-ID. */ + return env->cp15.c15_threadid; + case 8: /* TI925T_status */ + return 0; + } + /* TODO: Peripheral port remap register: + * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt + * controller base address at $rn & ~0xfff and map size of + * 0x200 << ($rn & 0xfff), when MMU is off. */ + goto bad_reg; + } + if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) { + switch (crm) { + case 0: + if ((op1 == 4) && (op2 == 0)) { + /* The config_base_address should hold the value of + * the peripheral base. ARM should get this from a CPU + * object property, but that support isn't available in + * December 2011. Default to 0 for now and board models + * that care can set it by a private hook */ + return env->cp15.c15_config_base_address; + } else if ((op1 == 0) && (op2 == 0)) { + /* power_control should be set to maximum latency. Again, + default to 0 and set by private hook */ + return env->cp15.c15_power_control; + } else if ((op1 == 0) && (op2 == 1)) { + return env->cp15.c15_diagnostic; + } else if ((op1 == 0) && (op2 == 2)) { + return env->cp15.c15_power_diagnostic; + } + break; + case 1: /* NEON Busy */ + return 0; + case 5: /* tlb lockdown */ + case 6: + case 7: + if ((op1 == 5) && (op2 == 2)) { + return 0; + } + break; + default: + break; + } + goto bad_reg; + } + return 0; + } +bad_reg: + /* ??? For debugging only. Should raise illegal instruction exception. */ + cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n", (insn >> 16) & 0xf, crm, op1, op2); + return 0; +} + +void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) { + if ((env->uncached_cpsr & CPSR_M) == mode) { + env->regs[13] = val; + } else { + env->banked_r13[bank_number(env, mode)] = val; + } +} + +uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) { + if ((env->uncached_cpsr & CPSR_M) == mode) { + return env->regs[13]; + } else { + return env->banked_r13[bank_number(env, mode)]; + } +} + +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) { + switch (reg) { + case 0: /* APSR */ + return xpsr_read(env) & 0xf8000000; + case 1: /* IAPSR */ + return xpsr_read(env) & 0xf80001ff; + case 2: /* EAPSR */ + return xpsr_read(env) & 0xff00fc00; + case 3: /* xPSR */ + return xpsr_read(env) & 0xff00fdff; + case 5: /* IPSR */ + return xpsr_read(env) & 0x000001ff; + case 6: /* EPSR */ + return xpsr_read(env) & 0x0700fc00; + case 7: /* IEPSR */ + return xpsr_read(env) & 0x0700edff; + case 8: /* MSP */ + return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; + case 9: /* PSP */ + return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; + case 16: /* PRIMASK */ + return (env->uncached_cpsr & CPSR_I) != 0; + case 17: /* BASEPRI */ + case 18: /* BASEPRI_MAX */ + return env->v7m.basepri; + case 19: /* FAULTMASK */ + return (env->uncached_cpsr & CPSR_F) != 0; + case 20: /* CONTROL */ + return env->v7m.control; + default: + /* ??? For debugging only. */ + cpu_abort(env, "Unimplemented system register read (%d)\n", reg); + return 0; + } +} + +void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { + switch (reg) { + case 0: /* APSR */ + xpsr_write(env, val, 0xf8000000); + break; + case 1: /* IAPSR */ + xpsr_write(env, val, 0xf8000000); + break; + case 2: /* EAPSR */ + xpsr_write(env, val, 0xfe00fc00); + break; + case 3: /* xPSR */ + xpsr_write(env, val, 0xfe00fc00); + break; + case 5: /* IPSR */ + /* IPSR bits are readonly. */ + break; + case 6: /* EPSR */ + xpsr_write(env, val, 0x0600fc00); + break; + case 7: /* IEPSR */ + xpsr_write(env, val, 0x0600fc00); + break; + case 8: /* MSP */ + if (env->v7m.current_sp) + env->v7m.other_sp = val; + else + env->regs[13] = val; + break; + case 9: /* PSP */ + if (env->v7m.current_sp) + env->regs[13] = val; + else + env->v7m.other_sp = val; + break; + case 16: /* PRIMASK */ + if (val & 1) + env->uncached_cpsr |= CPSR_I; + else + env->uncached_cpsr &= ~CPSR_I; + break; + case 17: /* BASEPRI */ + env->v7m.basepri = val & 0xff; + break; + case 18: /* BASEPRI_MAX */ + val &= 0xff; + if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) + env->v7m.basepri = val; + break; + case 19: /* FAULTMASK */ + if (val & 1) + env->uncached_cpsr |= CPSR_F; + else + env->uncached_cpsr &= ~CPSR_F; + break; + case 20: /* CONTROL */ + env->v7m.control = val & 3; + switch_v7m_sp(env, (val & 2) != 0); + break; + default: + /* ??? For debugging only. */ + cpu_abort(env, "Unimplemented system register write (%d)\n", reg); + return; + } +} + +void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write, void *opaque) { + if (cpnum < 0 || cpnum > 14) { + cpu_abort(env, "Bad coprocessor number: %i\n", cpnum); + return; + } + + env->cp[cpnum].cp_read = cp_read; + env->cp[cpnum].cp_write = cp_write; + env->cp[cpnum].opaque = opaque; +} + +/* Note that signed overflow is undefined in C. The following routines are + careful to use unsigned types where modulo arithmetic is required. + Failure to do so _will_ break on newer gcc. */ + +/* Signed saturating arithmetic. */ + +/* Perform 16-bit signed saturating addition. */ +static inline uint16_t add16_sat(uint16_t a, uint16_t b) { + uint16_t res; + + res = a + b; + if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating addition. */ +static inline uint8_t add8_sat(uint8_t a, uint8_t b) { + uint8_t res; + + res = a + b; + if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +/* Perform 16-bit signed saturating subtraction. */ +static inline uint16_t sub16_sat(uint16_t a, uint16_t b) { + uint16_t res; + + res = a - b; + if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating subtraction. */ +static inline uint8_t sub8_sat(uint8_t a, uint8_t b) { + uint8_t res; + + res = a - b; + if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); +#define PFX q + +#include "op_addsub.h" + +/* Unsigned saturating arithmetic. */ +static inline uint16_t add16_usat(uint16_t a, uint16_t b) { + uint16_t res; + res = a + b; + if (res < a) + res = 0xffff; + return res; +} + +static inline uint16_t sub16_usat(uint16_t a, uint16_t b) { + if (a > b) + return a - b; + else + return 0; +} + +static inline uint8_t add8_usat(uint8_t a, uint8_t b) { + uint8_t res; + res = a + b; + if (res < a) + res = 0xff; + return res; +} + +static inline uint8_t sub8_usat(uint8_t a, uint8_t b) { + if (a > b) + return a - b; + else + return 0; +} + +#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); +#define PFX uq + +#include "op_addsub.h" + +/* Signed modulo arithmetic. */ +#define SARITH16(a, b, n, op) \ + do { \ + int32_t sum; \ + sum = (int32_t)(int16_t)(a) op(int32_t)(int16_t)(b); \ + RESULT(sum, n, 16); \ + if (sum >= 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SARITH8(a, b, n, op) \ + do { \ + int32_t sum; \ + sum = (int32_t)(int8_t)(a) op(int32_t)(int8_t)(b); \ + RESULT(sum, n, 8); \ + if (sum >= 0) \ + ge |= 1 << n; \ + } while (0) + +#define ADD16(a, b, n) SARITH16(a, b, n, +) +#define SUB16(a, b, n) SARITH16(a, b, n, -) +#define ADD8(a, b, n) SARITH8(a, b, n, +) +#define SUB8(a, b, n) SARITH8(a, b, n, -) +#define PFX s +#define ARITH_GE + +#include "op_addsub.h" + +/* Unsigned modulo arithmetic. */ +#define ADD16(a, b, n) \ + do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 1) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define ADD8(a, b, n) \ + do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 1) \ + ge |= 1 << n; \ + } while (0) + +#define SUB16(a, b, n) \ + do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SUB8(a, b, n) \ + do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 0) \ + ge |= 1 << n; \ + } while (0) + +#define PFX u +#define ARITH_GE + +#include "op_addsub.h" + +/* Halved signed arithmetic. */ +#define ADD16(a, b, n) RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) +#define PFX sh + +#include "op_addsub.h" + +/* Halved unsigned arithmetic. */ +#define ADD16(a, b, n) RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define PFX uh + +#include "op_addsub.h" + +static inline uint8_t do_usad(uint8_t a, uint8_t b) { + if (a > b) + return a - b; + else + return b - a; +} + +/* Unsigned sum of absolute byte differences. */ +uint32_t HELPER(usad8)(uint32_t a, uint32_t b) { + uint32_t sum; + sum = do_usad(a, b); + sum += do_usad(a >> 8, b >> 8); + sum += do_usad(a >> 16, b >> 16); + sum += do_usad(a >> 24, b >> 24); + return sum; +} + +/* For ARMv6 SEL instruction. */ +uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) { + uint32_t mask; + + mask = 0; + if (flags & 1) + mask |= 0xff; + if (flags & 2) + mask |= 0xff00; + if (flags & 4) + mask |= 0xff0000; + if (flags & 8) + mask |= 0xff000000; + return (a & mask) | (b & ~mask); +} + +uint32_t HELPER(logicq_cc)(uint64_t val) { + return (val >> 32) | (val != 0); +} + +/* VFP support. We follow the convention used for VFP instrunctions: + Single precition routines have a "s" suffix, double precision a + "d" suffix. */ + +/* Convert host exception flags to vfp form. */ +static inline int vfp_exceptbits_from_host(int host_bits) { + int target_bits = 0; + + if (host_bits & float_flag_invalid) + target_bits |= 1; + if (host_bits & float_flag_divbyzero) + target_bits |= 2; + if (host_bits & float_flag_overflow) + target_bits |= 4; + if (host_bits & (float_flag_underflow | float_flag_output_denormal)) + target_bits |= 8; + if (host_bits & float_flag_inexact) + target_bits |= 0x10; + if (host_bits & float_flag_input_denormal) + target_bits |= 0x80; + return target_bits; +} + +uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) { + int i; + uint32_t fpscr; + + fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) | (env->vfp.vec_len << 16) | (env->vfp.vec_stride << 20); + i = get_float_exception_flags(&env->vfp.fp_status); + i |= get_float_exception_flags(&env->vfp.standard_fp_status); + fpscr |= vfp_exceptbits_from_host(i); + return fpscr; +} + +uint32_t vfp_get_fpscr(CPUARMState *env) { + return HELPER(vfp_get_fpscr)(env); +} + +/* Convert vfp exception flags to target form. */ +static inline int vfp_exceptbits_to_host(int target_bits) { + int host_bits = 0; + + if (target_bits & 1) + host_bits |= float_flag_invalid; + if (target_bits & 2) + host_bits |= float_flag_divbyzero; + if (target_bits & 4) + host_bits |= float_flag_overflow; + if (target_bits & 8) + host_bits |= float_flag_underflow; + if (target_bits & 0x10) + host_bits |= float_flag_inexact; + if (target_bits & 0x80) + host_bits |= float_flag_input_denormal; + return host_bits; +} + +void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) { + int i; + uint32_t changed; + + changed = env->vfp.xregs[ARM_VFP_FPSCR]; + env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); + env->vfp.vec_len = (val >> 16) & 7; + env->vfp.vec_stride = (val >> 20) & 3; + + changed ^= val; + if (changed & (3 << 22)) { + i = (val >> 22) & 3; + switch (i) { + case 0: + i = float_round_nearest_even; + break; + case 1: + i = float_round_up; + break; + case 2: + i = float_round_down; + break; + case 3: + i = float_round_to_zero; + break; + } + set_float_rounding_mode(i, &env->vfp.fp_status); + } + if (changed & (1 << 24)) { + set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); + set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); + } + if (changed & (1 << 25)) + set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status); + + i = vfp_exceptbits_to_host(val); + set_float_exception_flags(i, &env->vfp.fp_status); + set_float_exception_flags(0, &env->vfp.standard_fp_status); +} + +void vfp_set_fpscr(CPUARMState *env, uint32_t val) { + HELPER(vfp_set_fpscr)(env, val); +} + +#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_, name), p)) + +#define VFP_BINOP(name) \ + float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) { \ + float_status *fpst = fpstp; \ + return float32_##name(a, b, fpst); \ + } \ + float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) { \ + float_status *fpst = fpstp; \ + return float64_##name(a, b, fpst); \ + } +VFP_BINOP(add) +VFP_BINOP(sub) +VFP_BINOP(mul) +VFP_BINOP(div) +#undef VFP_BINOP + +float32 VFP_HELPER(neg, s)(float32 a) { + return float32_chs(a); +} + +float64 VFP_HELPER(neg, d)(float64 a) { + return float64_chs(a); +} + +float32 VFP_HELPER(abs, s)(float32 a) { + return float32_abs(a); +} + +float64 VFP_HELPER(abs, d)(float64 a) { + return float64_abs(a); +} + +float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) { + return float32_sqrt(a, &env->vfp.fp_status); +} + +float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) { + return float64_sqrt(a, &env->vfp.fp_status); +} + +/* XXX: check quiet/signaling case */ +#define DO_VFP_cmp(p, type) \ + void VFP_HELPER(cmp, p)(type a, type b, CPUARMState * env) { \ + uint32_t flags; \ + switch (type##_compare_quiet(a, b, &env->vfp.fp_status)) { \ + case 0: \ + flags = 0x6; \ + break; \ + case -1: \ + flags = 0x8; \ + break; \ + case 1: \ + flags = 0x2; \ + break; \ + default: \ + case 2: \ + flags = 0x3; \ + break; \ + } \ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ + } \ + void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState * env) { \ + uint32_t flags; \ + switch (type##_compare(a, b, &env->vfp.fp_status)) { \ + case 0: \ + flags = 0x6; \ + break; \ + case -1: \ + flags = 0x8; \ + break; \ + case 1: \ + flags = 0x2; \ + break; \ + default: \ + case 2: \ + flags = 0x3; \ + break; \ + } \ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ + } +DO_VFP_cmp(s, float32) DO_VFP_cmp(d, float64) +#undef DO_VFP_cmp + +/* Integer to float and float to integer conversions */ + +#define CONV_ITOF(name, fsz, sign) \ + float##fsz HELPER(name)(uint32_t x, void *fpstp) { \ + float_status *fpst = fpstp; \ + return sign##int32_to_##float##fsz((sign##int32_t) x, fpst); \ + } + +#define CONV_FTOI(name, fsz, sign, round) \ + uint32_t HELPER(name)(float##fsz x, void *fpstp) { \ + float_status *fpst = fpstp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##sign##int32##round(x, fpst); \ + } + +#define FLOAT_CONVS(name, p, fsz, sign) \ + CONV_ITOF(vfp_##name##to##p, fsz, sign) \ + CONV_FTOI(vfp_to##name##p, fsz, sign, ) \ + CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero) + + FLOAT_CONVS(si, s, 32, ) FLOAT_CONVS(si, d, 64, ) FLOAT_CONVS(ui, s, 32, u) FLOAT_CONVS(ui, d, 64, u) + +#undef CONV_ITOF +#undef CONV_FTOI +#undef FLOAT_CONVS + + /* floating point conversion */ + float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) { + float64 r = float32_to_float64(x, &env->vfp.fp_status); + /* ARM requires that S<->D conversion of any kind of NaN generates + * a quiet NaN by forcing the most significant frac bit to 1. + */ + return float64_maybe_silence_nan(r); +} + +float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) { + float32 r = float64_to_float32(x, &env->vfp.fp_status); + /* ARM requires that S<->D conversion of any kind of NaN generates + * a quiet NaN by forcing the most significant frac bit to 1. + */ + return float32_maybe_silence_nan(r); +} + +/* VFP3 fixed point conversion. */ +#define VFP_CONV_FIX(name, p, fsz, itype, sign) \ + float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, void *fpstp) { \ + float_status *fpst = fpstp; \ + float##fsz tmp; \ + tmp = sign##int32_to_##float##fsz((itype##_t) x, fpst); \ + return float##fsz##_scalbn(tmp, -(int) shift, fpst); \ + } \ + uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, void *fpstp) { \ + float_status *fpst = fpstp; \ + float##fsz tmp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + tmp = float##fsz##_scalbn(x, shift, fpst); \ + return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \ + } + +VFP_CONV_FIX(sh, d, 64, int16, ) +VFP_CONV_FIX(sl, d, 64, int32, ) +VFP_CONV_FIX(uh, d, 64, uint16, u) +VFP_CONV_FIX(ul, d, 64, uint32, u) +VFP_CONV_FIX(sh, s, 32, int16, ) +VFP_CONV_FIX(sl, s, 32, int32, ) +VFP_CONV_FIX(uh, s, 32, uint16, u) +VFP_CONV_FIX(ul, s, 32, uint32, u) +#undef VFP_CONV_FIX + +/* Half precision conversions. */ +static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s) { + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float32 r = float16_to_float32(make_float16(a), ieee, s); + if (ieee) { + return float32_maybe_silence_nan(r); + } + return r; +} + +static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s) { + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float16 r = float32_to_float16(a, ieee, s); + if (ieee) { + r = float16_maybe_silence_nan(r); + } + return float16_val(r); +} + +float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) { + return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status); +} + +uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env) { + return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status); +} + +float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) { + return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status); +} + +uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env) { + return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status); +} + +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) +#define float32_one_point_five make_float32(0x3fc00000) + +float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) { + float_status *s = &env->vfp.standard_fp_status; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_two; + } + return float32_sub(float32_two, float32_mul(a, b, s), s); +} + +float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) { + float_status *s = &env->vfp.standard_fp_status; + float32 product; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_one_point_five; + } + product = float32_mul(a, b, s); + return float32_div(float32_sub(float32_three, product, s), float32_two, s); +} + +/* NEON helpers. */ + +/* Constants 256 and 512 are used in some helpers; we avoid relying on + * int->float conversions at run-time. */ +#define float64_256 make_float64(0x4070000000000000LL) +#define float64_512 make_float64(0x4080000000000000LL) + +/* The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM. + */ +static float64 recip_estimate(float64 a, CPUARMState *env) { + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = env->vfp.standard_fp_status; + float_status *s = &dummy_status; + /* q = (int)(a * 512.0) */ + float64 q = float64_mul(float64_512, a, s); + int64_t q_int = float64_to_int64_round_to_zero(q, s); + + /* r = 1.0 / (((double)q + 0.5) / 512.0) */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_512, s); + q = float64_div(float64_one, q, s); + + /* s = (int)(256.0 * r + 0.5) */ + q = float64_mul(q, float64_256, s); + q = float64_add(q, float64_half, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* return (double)s / 256.0 */ + return float64_div(int64_to_float64(q_int, s), float64_256, s); +} + +float32 HELPER(recpe_f32)(float32 a, CPUARMState *env) { + float_status *s = &env->vfp.standard_fp_status; + float64 f64; + uint32_t val32 = float32_val(a); + + int result_exp; + int a_exp = (val32 & 0x7f800000) >> 23; + int sign = val32 & 0x80000000; + + if (float32_is_any_nan(a)) { + if (float32_is_signaling_nan(a)) { + float_raise(float_flag_invalid, s); + } + return float32_default_nan; + } else if (float32_is_infinity(a)) { + return float32_set_sign(float32_zero, float32_is_neg(a)); + } else if (float32_is_zero_or_denormal(a)) { + if (!float32_is_zero(a)) { + float_raise(float_flag_input_denormal, s); + } + float_raise(float_flag_divbyzero, s); + return float32_set_sign(float32_infinity, float32_is_neg(a)); + } else if (a_exp >= 253) { + float_raise(float_flag_underflow, s); + return float32_set_sign(float32_zero, float32_is_neg(a)); + } + + f64 = make_float64((0x3feULL << 52) | ((int64_t)(val32 & 0x7fffff) << 29)); + + result_exp = 253 - a_exp; + + f64 = recip_estimate(f64, env); + + val32 = sign | ((result_exp & 0xff) << 23) | ((float64_val(f64) >> 29) & 0x7fffff); + return make_float32(val32); +} + +/* The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM. + */ +static float64 recip_sqrt_estimate(float64 a, CPUARMState *env) { + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = env->vfp.standard_fp_status; + float_status *s = &dummy_status; + float64 q; + int64_t q_int; + + if (float64_lt(a, float64_half, s)) { + /* range 0.25 <= a < 0.5 */ + + /* a in units of 1/512 rounded down */ + /* q0 = (int)(a * 512.0); */ + q = float64_mul(float64_512, a, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* reciprocal root r */ + /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_512, s); + q = float64_sqrt(q, s); + q = float64_div(float64_one, q, s); + } else { + /* range 0.5 <= a < 1.0 */ + + /* a in units of 1/256 rounded down */ + /* q1 = (int)(a * 256.0); */ + q = float64_mul(float64_256, a, s); + int64_t q_int = float64_to_int64_round_to_zero(q, s); + + /* reciprocal root r */ + /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_256, s); + q = float64_sqrt(q, s); + q = float64_div(float64_one, q, s); + } + /* r in units of 1/256 rounded to nearest */ + /* s = (int)(256.0 * r + 0.5); */ + + q = float64_mul(q, float64_256, s); + q = float64_add(q, float64_half, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* return (double)s / 256.0;*/ + return float64_div(int64_to_float64(q_int, s), float64_256, s); +} + +float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env) { + float_status *s = &env->vfp.standard_fp_status; + int result_exp; + float64 f64; + uint32_t val; + uint64_t val64; + + val = float32_val(a); + + if (float32_is_any_nan(a)) { + if (float32_is_signaling_nan(a)) { + float_raise(float_flag_invalid, s); + } + return float32_default_nan; + } else if (float32_is_zero_or_denormal(a)) { + if (!float32_is_zero(a)) { + float_raise(float_flag_input_denormal, s); + } + float_raise(float_flag_divbyzero, s); + return float32_set_sign(float32_infinity, float32_is_neg(a)); + } else if (float32_is_neg(a)) { + float_raise(float_flag_invalid, s); + return float32_default_nan; + } else if (float32_is_infinity(a)) { + return float32_zero; + } + + /* Normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + if ((val & 0x800000) == 0) { + f64 = + make_float64(((uint64_t)(val & 0x80000000) << 32) | (0x3feULL << 52) | ((uint64_t)(val & 0x7fffff) << 29)); + } else { + f64 = + make_float64(((uint64_t)(val & 0x80000000) << 32) | (0x3fdULL << 52) | ((uint64_t)(val & 0x7fffff) << 29)); + } + + result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2; + + f64 = recip_sqrt_estimate(f64, env); + + val64 = float64_val(f64); + + val = ((result_exp & 0xff) << 23) | ((val64 >> 29) & 0x7fffff); + return make_float32(val); +} + +uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env) { + float64 f64; + + if ((a & 0x80000000) == 0) { + return 0xffffffff; + } + + f64 = make_float64((0x3feULL << 52) | ((int64_t)(a & 0x7fffffff) << 21)); + + f64 = recip_estimate(f64, env); + + return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); +} + +uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env) { + float64 f64; + + if ((a & 0xc0000000) == 0) { + return 0xffffffff; + } + + if (a & 0x80000000) { + f64 = make_float64((0x3feULL << 52) | ((uint64_t)(a & 0x7fffffff) << 21)); + } else { /* bits 31-30 == '01' */ + f64 = make_float64((0x3fdULL << 52) | ((uint64_t)(a & 0x3fffffff) << 22)); + } + + f64 = recip_sqrt_estimate(f64, env); + + return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); +} + +/* VFPv4 fused multiply-accumulate */ +float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) { + float_status *fpst = fpstp; + return float32_muladd(a, b, c, 0, fpst); +} + +float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) { + float_status *fpst = fpstp; + return float64_muladd(a, b, c, 0, fpst); +} + +void HELPER(set_teecr)(CPUARMState *env, uint32_t val) { + val &= 1; + if (env->teecr != val) { + env->teecr = val; + tb_flush(env); + } +} diff --git a/src/target-arm/helper.h b/src/target-arm/helper.h new file mode 100644 index 0000000..10458c3 --- /dev/null +++ b/src/target-arm/helper.h @@ -0,0 +1,506 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2016 Cyberhaven +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#include "def-helper.h" + +#include + + +#define _RM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) +#define _WM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) +#define _AM_EXCP 0 + +DEF_HELPER_2_M(cpsr_write, void, i32, i32, -1, -1, 1) +DEF_HELPER_0_M(cpsr_read, i32, -1, -1, 1) + +DEF_HELPER_1_M(get_user_reg, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(set_user_reg, void, i32, i32, -1, -1, 1) + +DEF_HELPER_2_M(add_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(adc_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(sub_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(sbc_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(shl_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(shr_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(sar_cc, i32, i32, i32, -1, -1, 1) +DEF_HELPER_2_M(ror_cc, i32, i32, i32, -1, -1, 1) + +DEF_HELPER_1(clz, i32, i32) +DEF_HELPER_1(sxtb16, i32, i32) +DEF_HELPER_1(uxtb16, i32, i32) + +DEF_HELPER_2(add_setq, i32, i32, i32) +DEF_HELPER_2(add_saturate, i32, i32, i32) +DEF_HELPER_2(sub_saturate, i32, i32, i32) +DEF_HELPER_2(add_usaturate, i32, i32, i32) +DEF_HELPER_2(sub_usaturate, i32, i32, i32) +DEF_HELPER_1(double_saturate, i32, s32) +DEF_HELPER_2(sdiv, s32, s32, s32) +DEF_HELPER_2(udiv, i32, i32, i32) +DEF_HELPER_1(rbit, i32, i32) +DEF_HELPER_1(abs, i32, i32) + +#define PAS_OP(pfx) \ + DEF_HELPER_3(pfx##add8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx##sub8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx##sub16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx##add16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx##addsubx, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx##subaddx, i32, i32, i32, ptr) + +PAS_OP(s) +PAS_OP(u) +#undef PAS_OP + +#define PAS_OP(pfx) \ + DEF_HELPER_2(pfx##add8, i32, i32, i32) \ + DEF_HELPER_2(pfx##sub8, i32, i32, i32) \ + DEF_HELPER_2(pfx##sub16, i32, i32, i32) \ + DEF_HELPER_2(pfx##add16, i32, i32, i32) \ + DEF_HELPER_2(pfx##addsubx, i32, i32, i32) \ + DEF_HELPER_2(pfx##subaddx, i32, i32, i32) +PAS_OP(q) +PAS_OP(sh) +PAS_OP(uq) +PAS_OP(uh) +#undef PAS_OP + +DEF_HELPER_2(ssat, i32, i32, i32) +DEF_HELPER_2(usat, i32, i32, i32) +DEF_HELPER_2(ssat16, i32, i32, i32) +DEF_HELPER_2(usat16, i32, i32, i32) + +DEF_HELPER_2(usad8, i32, i32, i32) + +DEF_HELPER_1(logicq_cc, i32, i64) + +DEF_HELPER_3(sel_flags, i32, i32, i32, i32) +DEF_HELPER_1(exception, void, i32) +DEF_HELPER_0(wfi, void) + +DEF_HELPER_2(get_r13_banked, i32, env, i32) +DEF_HELPER_3(set_r13_banked, void, env, i32, i32) + +//DEF_HELPER_2(get_r14_banked, i32, env, i32) +//DEF_HELPER_3(set_r14_banked, void, env, i32, i32) + +//DEF_HELPER_2(get_spsr_banked, i32, env, i32) +//DEF_HELPER_3(set_spsr_banked, void, env, i32, i32) + +DEF_HELPER_3(v7m_msr, void, env, i32, i32) +DEF_HELPER_2(v7m_mrs, i32, env, i32) + +DEF_HELPER_3(set_cp15, void, env, i32, i32) +DEF_HELPER_2(get_cp15, i32, env, i32) + +DEF_HELPER_3(set_cp, void, env, i32, i32) +DEF_HELPER_2(get_cp, i32, env, i32) + +DEF_HELPER_1(vfp_get_fpscr, i32, env) +DEF_HELPER_2(vfp_set_fpscr, void, env, i32) + +DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr) +DEF_HELPER_1(vfp_negs, f32, f32) +DEF_HELPER_1(vfp_negd, f64, f64) +DEF_HELPER_1(vfp_abss, f32, f32) +DEF_HELPER_1(vfp_absd, f64, f64) +DEF_HELPER_2(vfp_sqrts, f32, f32, env) +DEF_HELPER_2(vfp_sqrtd, f64, f64, env) +DEF_HELPER_3(vfp_cmps, void, f32, f32, env) +DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) +DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) +DEF_HELPER_3(vfp_cmped, void, f64, f64, env) + +DEF_HELPER_2(vfp_fcvtds, f64, f32, env) +DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) + +DEF_HELPER_2(vfp_uitos, f32, i32, ptr) +DEF_HELPER_2(vfp_uitod, f64, i32, ptr) +DEF_HELPER_2(vfp_sitos, f32, i32, ptr) +DEF_HELPER_2(vfp_sitod, f64, i32, ptr) + +DEF_HELPER_2(vfp_touis, i32, f32, ptr) +DEF_HELPER_2(vfp_touid, i32, f64, ptr) +DEF_HELPER_2(vfp_touizs, i32, f32, ptr) +DEF_HELPER_2(vfp_touizd, i32, f64, ptr) +DEF_HELPER_2(vfp_tosis, i32, f32, ptr) +DEF_HELPER_2(vfp_tosid, i32, f64, ptr) +DEF_HELPER_2(vfp_tosizs, i32, f32, ptr) +DEF_HELPER_2(vfp_tosizd, i32, f64, ptr) + +DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) + +DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) +DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) +DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env) +DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env) + +DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) +DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) + +DEF_HELPER_3(recps_f32, f32, f32, f32, env) +DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) +DEF_HELPER_2(recpe_f32, f32, f32, env) +DEF_HELPER_2(rsqrte_f32, f32, f32, env) +DEF_HELPER_2(recpe_u32, i32, i32, env) +DEF_HELPER_2(rsqrte_u32, i32, i32, env) +DEF_HELPER_4(neon_tbl, i32, i32, i32, i32, i32) + +DEF_HELPER_2(shl, i32, i32, i32) +DEF_HELPER_2(shr, i32, i32, i32) +DEF_HELPER_2(sar, i32, i32, i32) + +/* neon_helper.c */ +DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64) + +DEF_HELPER_2(neon_hadd_s8, i32, i32, i32) +DEF_HELPER_2(neon_hadd_u8, i32, i32, i32) +DEF_HELPER_2(neon_hadd_s16, i32, i32, i32) +DEF_HELPER_2(neon_hadd_u16, i32, i32, i32) +DEF_HELPER_2(neon_hadd_s32, s32, s32, s32) +DEF_HELPER_2(neon_hadd_u32, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32) +DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s8, i32, i32, i32) +DEF_HELPER_2(neon_hsub_u8, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s16, i32, i32, i32) +DEF_HELPER_2(neon_hsub_u16, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s32, s32, s32, s32) +DEF_HELPER_2(neon_hsub_u32, i32, i32, i32) + +DEF_HELPER_2(neon_cgt_u8, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s8, i32, i32, i32) +DEF_HELPER_2(neon_cgt_u16, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s16, i32, i32, i32) +DEF_HELPER_2(neon_cgt_u32, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s32, i32, i32, i32) +DEF_HELPER_2(neon_cge_u8, i32, i32, i32) +DEF_HELPER_2(neon_cge_s8, i32, i32, i32) +DEF_HELPER_2(neon_cge_u16, i32, i32, i32) +DEF_HELPER_2(neon_cge_s16, i32, i32, i32) +DEF_HELPER_2(neon_cge_u32, i32, i32, i32) +DEF_HELPER_2(neon_cge_s32, i32, i32, i32) + +DEF_HELPER_2(neon_min_u8, i32, i32, i32) +DEF_HELPER_2(neon_min_s8, i32, i32, i32) +DEF_HELPER_2(neon_min_u16, i32, i32, i32) +DEF_HELPER_2(neon_min_s16, i32, i32, i32) +DEF_HELPER_2(neon_min_u32, i32, i32, i32) +DEF_HELPER_2(neon_min_s32, i32, i32, i32) +DEF_HELPER_2(neon_max_u8, i32, i32, i32) +DEF_HELPER_2(neon_max_s8, i32, i32, i32) +DEF_HELPER_2(neon_max_u16, i32, i32, i32) +DEF_HELPER_2(neon_max_s16, i32, i32, i32) +DEF_HELPER_2(neon_max_u32, i32, i32, i32) +DEF_HELPER_2(neon_max_s32, i32, i32, i32) +DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) + +DEF_HELPER_2(neon_abd_u8, i32, i32, i32) +DEF_HELPER_2(neon_abd_s8, i32, i32, i32) +DEF_HELPER_2(neon_abd_u16, i32, i32, i32) +DEF_HELPER_2(neon_abd_s16, i32, i32, i32) +DEF_HELPER_2(neon_abd_u32, i32, i32, i32) +DEF_HELPER_2(neon_abd_s32, i32, i32, i32) + +DEF_HELPER_2(neon_shl_u8, i32, i32, i32) +DEF_HELPER_2(neon_shl_s8, i32, i32, i32) +DEF_HELPER_2(neon_shl_u16, i32, i32, i32) +DEF_HELPER_2(neon_shl_s16, i32, i32, i32) +DEF_HELPER_2(neon_shl_u32, i32, i32, i32) +DEF_HELPER_2(neon_shl_s32, i32, i32, i32) +DEF_HELPER_2(neon_shl_u64, i64, i64, i64) +DEF_HELPER_2(neon_shl_s64, i64, i64, i64) +DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) +DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) +DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32); +DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32); +DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32); +DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64); +DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) +//DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) +//DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) + +DEF_HELPER_2(neon_add_u8, i32, i32, i32) +DEF_HELPER_2(neon_add_u16, i32, i32, i32) +DEF_HELPER_2(neon_padd_u8, i32, i32, i32) +DEF_HELPER_2(neon_padd_u16, i32, i32, i32) +DEF_HELPER_2(neon_sub_u8, i32, i32, i32) +DEF_HELPER_2(neon_sub_u16, i32, i32, i32) +DEF_HELPER_2(neon_mul_u8, i32, i32, i32) +DEF_HELPER_2(neon_mul_u16, i32, i32, i32) +DEF_HELPER_2(neon_mul_p8, i32, i32, i32) +DEF_HELPER_2(neon_mull_p8, i64, i32, i32) + +DEF_HELPER_2(neon_tst_u8, i32, i32, i32) +DEF_HELPER_2(neon_tst_u16, i32, i32, i32) +DEF_HELPER_2(neon_tst_u32, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u8, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u16, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u32, i32, i32, i32) + +DEF_HELPER_1(neon_abs_s8, i32, i32) +DEF_HELPER_1(neon_abs_s16, i32, i32) +DEF_HELPER_1(neon_clz_u8, i32, i32) +DEF_HELPER_1(neon_clz_u16, i32, i32) +DEF_HELPER_1(neon_cls_s8, i32, i32) +DEF_HELPER_1(neon_cls_s16, i32, i32) +DEF_HELPER_1(neon_cls_s32, i32, i32) +DEF_HELPER_1(neon_cnt_u8, i32, i32) + +DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) + +DEF_HELPER_1(neon_narrow_u8, i32, i64) +DEF_HELPER_1(neon_narrow_u16, i32, i64) +DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64) +DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64) +DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64) +DEF_HELPER_1(neon_narrow_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_high_u16, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) +DEF_HELPER_1(neon_widen_u8, i64, i32) +DEF_HELPER_1(neon_widen_s8, i64, i32) +DEF_HELPER_1(neon_widen_u16, i64, i32) +DEF_HELPER_1(neon_widen_s16, i64, i32) + +DEF_HELPER_2(neon_addl_u16, i64, i64, i64) +DEF_HELPER_2(neon_addl_u32, i64, i64, i64) +DEF_HELPER_2(neon_paddl_u16, i64, i64, i64) +DEF_HELPER_2(neon_paddl_u32, i64, i64, i64) +DEF_HELPER_2(neon_subl_u16, i64, i64, i64) +DEF_HELPER_2(neon_subl_u32, i64, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) +DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) +DEF_HELPER_2(neon_mull_u8, i64, i32, i32) +DEF_HELPER_2(neon_mull_s8, i64, i32, i32) +DEF_HELPER_2(neon_mull_u16, i64, i32, i32) +DEF_HELPER_2(neon_mull_s16, i64, i32, i32) + +DEF_HELPER_1(neon_negl_u16, i64, i64) +DEF_HELPER_1(neon_negl_u32, i64, i64) +DEF_HELPER_1(neon_negl_u64, i64, i64) + +DEF_HELPER_2(neon_qabs_s8, i32, env, i32) +DEF_HELPER_2(neon_qabs_s16, i32, env, i32) +DEF_HELPER_2(neon_qabs_s32, i32, env, i32) +DEF_HELPER_2(neon_qneg_s8, i32, env, i32) +DEF_HELPER_2(neon_qneg_s16, i32, env, i32) +DEF_HELPER_2(neon_qneg_s32, i32, env, i32) + +DEF_HELPER_3(neon_min_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_max_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr) + +/* iwmmxt_helper.c */ +DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) +DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) + +#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ + DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ + DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ + DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) + +DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) +DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) + +DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) + +DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) + +DEF_IWMMXT_HELPER_SIZE_ENV(mins) +DEF_IWMMXT_HELPER_SIZE_ENV(minu) +DEF_IWMMXT_HELPER_SIZE_ENV(maxs) +DEF_IWMMXT_HELPER_SIZE_ENV(maxu) + +DEF_IWMMXT_HELPER_SIZE_ENV(subn) +DEF_IWMMXT_HELPER_SIZE_ENV(addn) +DEF_IWMMXT_HELPER_SIZE_ENV(subu) +DEF_IWMMXT_HELPER_SIZE_ENV(addu) +DEF_IWMMXT_HELPER_SIZE_ENV(subs) +DEF_IWMMXT_HELPER_SIZE_ENV(adds) + +DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) + +DEF_HELPER_2(iwmmxt_msadb, i64, i64, i64) + +DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) +DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) + +DEF_HELPER_1(iwmmxt_bcstb, i64, i32) +DEF_HELPER_1(iwmmxt_bcstw, i64, i32) +DEF_HELPER_1(iwmmxt_bcstl, i64, i32) + +DEF_HELPER_1(iwmmxt_addcb, i64, i64) +DEF_HELPER_1(iwmmxt_addcw, i64, i64) +DEF_HELPER_1(iwmmxt_addcl, i64, i64) + +DEF_HELPER_1(iwmmxt_msbb, i32, i64) +DEF_HELPER_1(iwmmxt_msbw, i32, i64) +DEF_HELPER_1(iwmmxt_msbl, i32, i64) + +DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) + +DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) + +DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) + +DEF_HELPER_2(set_teecr, void, env, i32) + +DEF_HELPER_3(neon_unzip8, void, env, i32, i32) +DEF_HELPER_3(neon_unzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip8, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip32, void, env, i32, i32) +DEF_HELPER_3(neon_zip8, void, env, i32, i32) +DEF_HELPER_3(neon_zip16, void, env, i32, i32) +DEF_HELPER_3(neon_qzip8, void, env, i32, i32) +DEF_HELPER_3(neon_qzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qzip32, void, env, i32, i32) + +#include "def-helper.h" diff --git a/src/target-arm/iwmmxt_helper.c b/src/target-arm/iwmmxt_helper.c new file mode 100644 index 0000000..1dd8d1a --- /dev/null +++ b/src/target-arm/iwmmxt_helper.c @@ -0,0 +1,681 @@ +/* + * iwMMXt micro operations for XScale. + * + * Copyright (c) 2007 OpenedHand, Ltd. + * Written by Andrzej Zaborowski + * Copyright (c) 2008 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include +#include + +#include "cpu.h" +#include "exec-all.h" +#include "helper.h" + +/* iwMMXt macros extracted from GNU gdb. */ + +/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ +#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) +#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) +#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) +#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) +/* Flags to pass as "n" above. */ +#define SIMD_NBIT -1 +#define SIMD_ZBIT -2 +#define SIMD_CBIT -3 +#define SIMD_VBIT -4 +/* Various status bit macros. */ +#define NBIT8(x) ((x) & 0x80) +#define NBIT16(x) ((x) & 0x8000) +#define NBIT32(x) ((x) & 0x80000000) +#define NBIT64(x) ((x) & 0x8000000000000000ULL) +#define ZBIT8(x) (((x) & 0xff) == 0) +#define ZBIT16(x) (((x) & 0xffff) == 0) +#define ZBIT32(x) (((x) & 0xffffffff) == 0) +#define ZBIT64(x) (x == 0) +/* Sign extension macros. */ +#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) +#define EXTEND8(a) ((uint32_t) (int8_t) (a)) +#define EXTEND16(a) ((uint32_t) (int16_t) (a)) +#define EXTEND16S(a) ((int32_t) (int16_t) (a)) +#define EXTEND32(a) ((uint64_t) (int32_t) (a)) + +uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) +{ + a = (( + EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + + EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff) + ) & 0xffffffff) | ((uint64_t) ( + EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + + EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff) + ) << 32); + return a; +} + +uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) +{ + a = (( + ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff) + ) & 0xffffffff) | (( + ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff) + ) << 32); + return a; +} + +uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) +{ +#define abs(x) (((x) >= 0) ? x : -x) +#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff)) + return + SADB(0) + SADB(8) + SADB(16) + SADB(24) + + SADB(32) + SADB(40) + SADB(48) + SADB(56); +#undef SADB +} + +uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) +{ +#define SADW(SHR) \ + abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) + return SADW(0) + SADW(16) + SADW(32) + SADW(48); +#undef SADW +} + +uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) +{ +#define MULS(SHR) ((uint64_t) ((( \ + EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ + ) >> 0) & 0xffff) << SHR) + return MULS(0) | MULS(16) | MULS(32) | MULS(48); +#undef MULS +} + +uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) +{ +#define MULS(SHR) ((uint64_t) ((( \ + EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ + ) >> 16) & 0xffff) << SHR) + return MULS(0) | MULS(16) | MULS(32) | MULS(48); +#undef MULS +} + +uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) +{ +#define MULU(SHR) ((uint64_t) ((( \ + ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ + ) >> 0) & 0xffff) << SHR) + return MULU(0) | MULU(16) | MULU(32) | MULU(48); +#undef MULU +} + +uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) +{ +#define MULU(SHR) ((uint64_t) ((( \ + ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ + ) >> 16) & 0xffff) << SHR) + return MULU(0) | MULU(16) | MULU(32) | MULU(48); +#undef MULU +} + +uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) +{ +#define MACS(SHR) ( \ + EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) + return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48)); +#undef MACS +} + +uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) +{ +#define MACU(SHR) ( \ + (uint32_t) ((a >> SHR) & 0xffff) * \ + (uint32_t) ((b >> SHR) & 0xffff)) + return MACU(0) + MACU(16) + MACU(32) + MACU(48); +#undef MACU +} + +#define NZBIT8(x, i) \ + SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \ + SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i) +#define NZBIT16(x, i) \ + SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \ + SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i) +#define NZBIT32(x, i) \ + SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \ + SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i) +#define NZBIT64(x) \ + SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ + SIMD64_SET(ZBIT64(x), SIMD_ZBIT) +#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ + (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ + (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ + (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xffff) << 0) | \ + (((b >> SH0) & 0xffff) << 16) | \ + (((a >> SH2) & 0xffff) << 32) | \ + (((b >> SH2) & 0xffff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ + NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xffffffff) << 0) | \ + (((b >> SH0) & 0xffffffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + (((x >> SH0) & 0xff) << 0) | \ + (((x >> SH1) & 0xff) << 16) | \ + (((x >> SH2) & 0xff) << 32) | \ + (((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + (((x >> SH0) & 0xffff) << 0) | \ + (((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = (((x >> SH0) & 0xffffffff) << 0); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ + ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ + ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ + ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ + ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = EXTEND32((x >> SH0) & 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ +} +IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) +IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) + +#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ + CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ + CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ + CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ + CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = CMP(0, Tl, O, 0xffffffff) | \ + CMP(32, Tl, O, 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ +} +#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ + (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) +IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==) +IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >) +IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >) +#undef CMP +#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ + (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) +IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <) +IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <) +IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >) +IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >) +#undef CMP +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ + OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -) +IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +) +#undef CMP +/* TODO Signed- and Unsigned-Saturation */ +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ + OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -) +IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +) +IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -) +IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +) +#undef CMP +#undef IWMMXT_OP_CMP + +#define AVGB(SHR) ((( \ + ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGB(r) \ +uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \ +{ \ + const int round = r; \ + a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \ + AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \ + SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ + SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \ + SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ + SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \ + SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ + SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \ + SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ + return a; \ +} +IWMMXT_OP_AVGB(0) +IWMMXT_OP_AVGB(1) +#undef IWMMXT_OP_AVGB +#undef AVGB + +#define AVGW(SHR) ((( \ + ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGW(r) \ +uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \ +{ \ + const int round = r; \ + a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ + SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ + SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ + SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ + return a; \ +} +IWMMXT_OP_AVGW(0) +IWMMXT_OP_AVGW(1) +#undef IWMMXT_OP_AVGW +#undef AVGW + +uint64_t HELPER(iwmmxt_msadb)(uint64_t a, uint64_t b) +{ + a = ((((a >> 0 ) & 0xffff) * ((b >> 0) & 0xffff) + + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) | + ((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32); + return a; +} + +uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) +{ + a >>= n << 3; + a |= b << (64 - (n << 3)); + return a; +} + +uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) +{ + x &= ~((uint64_t) b << n); + x |= (uint64_t) (a & b) << n; + return x; +} + +uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) +{ + return SIMD64_SET((x == 0), SIMD_ZBIT) | + SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); +} + +uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) +{ + arg &= 0xff; + return + ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) | + ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | + ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | + ((uint64_t) arg << 48) | ((uint64_t) arg << 56); +} + +uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) +{ + arg &= 0xffff; + return + ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) | + ((uint64_t) arg << 32) | ((uint64_t) arg << 48); +} + +uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) +{ + return arg | ((uint64_t) arg << 32); +} + +uint64_t HELPER(iwmmxt_addcb)(uint64_t x) +{ + return + ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + + ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + + ((x >> 32) & 0xff) + ((x >> 40) & 0xff) + + ((x >> 48) & 0xff) + ((x >> 56) & 0xff); +} + +uint64_t HELPER(iwmmxt_addcw)(uint64_t x) +{ + return + ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + + ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); +} + +uint64_t HELPER(iwmmxt_addcl)(uint64_t x) +{ + return (x & 0xffffffff) + (x >> 32); +} + +uint32_t HELPER(iwmmxt_msbb)(uint64_t x) +{ + return + ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | + ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | + ((x >> 35) & 0x10) | ((x >> 42) & 0x20) | + ((x >> 49) & 0x40) | ((x >> 56) & 0x80); +} + +uint32_t HELPER(iwmmxt_msbw)(uint64_t x) +{ + return + ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | + ((x >> 45) & 0x04) | ((x >> 52) & 0x08); +} + +uint32_t HELPER(iwmmxt_msbl)(uint64_t x) +{ + return ((x >> 31) & 0x01) | ((x >> 62) & 0x02); +} + +/* FIXME: Split wCASF setting into a separate op to avoid env use. */ +uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | + (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | + (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x & (0xffffffffll << 0)) >> n) | + ((x >> n) & (0xffffffffll << 32)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x >>= n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | + (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | + (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x << n) & (0xffffffffll << 0)) | + ((x & (0xffffffffll << 32)) << n); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x <<= n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | + ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | + ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | + ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | + (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (int64_t) x >> n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((((x & (0xffffll << 0)) >> n) | + ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | + ((((x & (0xffffll << 16)) >> n) | + ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | + ((((x & (0xffffll << 32)) >> n) | + ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | + ((((x & (0xffffll << 48)) >> n) | + ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x & (0xffffffffll << 0)) >> n) | + ((x >> n) & (0xffffffffll << 32)) | + ((x << (32 - n)) & (0xffffffffll << 0)) | + ((x & (0xffffffffll << 32)) << (32 - n)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (x >> n) | (x << (64 - n)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | + (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | + (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | + (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +/* TODO: Unsigned-Saturation */ +uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | + (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | + (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); + return a; +} + +uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | + (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + return a; +} + +uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + return a; +} + +/* TODO: Signed-Saturation */ +uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | + (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | + (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); + return a; +} + +uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | + (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + return a; +} + +uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + return a; +} + +uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) +{ + return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b)); +} + +uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) +{ + c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * + EXTEND16S((b >> 0) & 0xffff)); + c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * + EXTEND16S((b >> 16) & 0xffff)); + return c; +} + +uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) +{ + return c + (EXTEND32(EXTEND16S(a & 0xffff) * + EXTEND16S(b & 0xffff))); +} diff --git a/src/target-arm/neon_helper.c b/src/target-arm/neon_helper.c new file mode 100644 index 0000000..76d8fa6 --- /dev/null +++ b/src/target-arm/neon_helper.c @@ -0,0 +1,2020 @@ +/* + * ARM NEON vector operations. + * + * Copyright (c) 2007, 2008 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GNU GPL v2. + */ +#include +#include + +#include "cpu.h" +#include "exec-all.h" +#include "helper.h" + +#define SIGNBIT (uint32_t)0x80000000 +#define SIGNBIT64 ((uint64_t)1 << 63) + +#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q + +#define NEON_TYPE1(name, type) \ +typedef struct \ +{ \ + type v1; \ +} neon_##name; +#ifdef HOST_WORDS_BIGENDIAN +#define NEON_TYPE2(name, type) \ +typedef struct \ +{ \ + type v2; \ + type v1; \ +} neon_##name; +#define NEON_TYPE4(name, type) \ +typedef struct \ +{ \ + type v4; \ + type v3; \ + type v2; \ + type v1; \ +} neon_##name; +#else +#define NEON_TYPE2(name, type) \ +typedef struct \ +{ \ + type v1; \ + type v2; \ +} neon_##name; +#define NEON_TYPE4(name, type) \ +typedef struct \ +{ \ + type v1; \ + type v2; \ + type v3; \ + type v4; \ +} neon_##name; +#endif + +NEON_TYPE4(s8, int8_t) +NEON_TYPE4(u8, uint8_t) +NEON_TYPE2(s16, int16_t) +NEON_TYPE2(u16, uint16_t) +NEON_TYPE1(s32, int32_t) +NEON_TYPE1(u32, uint32_t) +#undef NEON_TYPE4 +#undef NEON_TYPE2 +#undef NEON_TYPE1 + +/* Copy from a uint32_t to a vector structure type. */ +#define NEON_UNPACK(vtype, dest, val) do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.i = (val); \ + dest = conv_u.v; \ + } while(0) + +/* Copy from a vector structure type to a uint32_t. */ +#define NEON_PACK(vtype, dest, val) do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.v = (val); \ + dest = conv_u.i; \ + } while(0) + +#define NEON_DO1 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); +#define NEON_DO2 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ + NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); +#define NEON_DO4 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ + NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \ + NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \ + NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4); + +#define NEON_VOP_BODY(vtype, n) \ +{ \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_DO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ +} + +#define NEON_VOP(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ +NEON_VOP_BODY(vtype, n) + +#define NEON_VOP_ENV(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \ +NEON_VOP_BODY(vtype, n) + +/* Pairwise operations. */ +/* For 32-bit elements each segment only contains a single element, so + the elementwise and pairwise operations are the same. */ +#define NEON_PDO2 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ + NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2); +#define NEON_PDO4 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ + NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \ + NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \ + NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \ + +#define NEON_POP(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ +{ \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_PDO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ +} + +/* Unary operators. */ +#define NEON_VOP1(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg) \ +{ \ + vtype vsrc1; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg); \ + NEON_DO##n; \ + NEON_PACK(vtype, arg, vdest); \ + return arg; \ +} + + +#define NEON_USAT(dest, src1, src2, type) do { \ + uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = tmp; \ + }} while(0) +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) +NEON_VOP_ENV(qadd_u8, neon_u8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) +NEON_VOP_ENV(qadd_u16, neon_u16, 2) +#undef NEON_FN +#undef NEON_USAT + +uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (res < a) { + SET_QC(); + res = ~0; + } + return res; +} + +uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 + src2; + if (res < src1) { + SET_QC(); + res = ~(uint64_t)0; + } + return res; +} + +#define NEON_SSAT(dest, src1, src2, type) do { \ + int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + if (src2 > 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) +NEON_VOP_ENV(qadd_s8, neon_s8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) +NEON_VOP_ENV(qadd_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_SSAT + +uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { + SET_QC(); + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 + src2; + if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { + SET_QC(); + res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + } + return res; +} + +#define NEON_USAT(dest, src1, src2, type) do { \ + uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + dest = tmp; \ + }} while(0) +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) +NEON_VOP_ENV(qsub_u8, neon_u8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) +NEON_VOP_ENV(qsub_u16, neon_u16, 2) +#undef NEON_FN +#undef NEON_USAT + +uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (res > a) { + SET_QC(); + res = 0; + } + return res; +} + +uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + if (src1 < src2) { + SET_QC(); + res = 0; + } else { + res = src1 - src2; + } + return res; +} + +#define NEON_SSAT(dest, src1, src2, type) do { \ + int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + if (src2 < 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) +NEON_VOP_ENV(qsub_s8, neon_s8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) +NEON_VOP_ENV(qsub_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_SSAT + +uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { + SET_QC(); + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 - src2; + if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { + SET_QC(); + res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + } + return res; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1 +NEON_VOP(hadd_s8, neon_s8, 4) +NEON_VOP(hadd_u8, neon_u8, 4) +NEON_VOP(hadd_s16, neon_s16, 2) +NEON_VOP(hadd_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if (src1 & src2 & 1) + dest++; + return dest; +} + +uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if (src1 & src2 & 1) + dest++; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1 +NEON_VOP(rhadd_s8, neon_s8, 4) +NEON_VOP(rhadd_u8, neon_u8, 4) +NEON_VOP(rhadd_s16, neon_s16, 2) +NEON_VOP(rhadd_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if ((src1 | src2) & 1) + dest++; + return dest; +} + +uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if ((src1 | src2) & 1) + dest++; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1 +NEON_VOP(hsub_s8, neon_s8, 4) +NEON_VOP(hsub_u8, neon_u8, 4) +NEON_VOP(hsub_s16, neon_s16, 2) +NEON_VOP(hsub_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) - (src2 >> 1); + if ((~src1) & src2 & 1) + dest--; + return dest; +} + +uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) - (src2 >> 1); + if ((~src1) & src2 & 1) + dest--; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0 +NEON_VOP(cgt_s8, neon_s8, 4) +NEON_VOP(cgt_u8, neon_u8, 4) +NEON_VOP(cgt_s16, neon_s16, 2) +NEON_VOP(cgt_u16, neon_u16, 2) +NEON_VOP(cgt_s32, neon_s32, 1) +NEON_VOP(cgt_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0 +NEON_VOP(cge_s8, neon_s8, 4) +NEON_VOP(cge_u8, neon_u8, 4) +NEON_VOP(cge_s16, neon_s16, 2) +NEON_VOP(cge_u16, neon_u16, 2) +NEON_VOP(cge_s32, neon_s32, 1) +NEON_VOP(cge_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2 +NEON_VOP(min_s8, neon_s8, 4) +NEON_VOP(min_u8, neon_u8, 4) +NEON_VOP(min_s16, neon_s16, 2) +NEON_VOP(min_u16, neon_u16, 2) +NEON_VOP(min_s32, neon_s32, 1) +NEON_VOP(min_u32, neon_u32, 1) +NEON_POP(pmin_s8, neon_s8, 4) +NEON_POP(pmin_u8, neon_u8, 4) +NEON_POP(pmin_s16, neon_s16, 2) +NEON_POP(pmin_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2 +NEON_VOP(max_s8, neon_s8, 4) +NEON_VOP(max_u8, neon_u8, 4) +NEON_VOP(max_s16, neon_s16, 2) +NEON_VOP(max_u16, neon_u16, 2) +NEON_VOP(max_s32, neon_s32, 1) +NEON_VOP(max_u32, neon_u32, 1) +NEON_POP(pmax_s8, neon_s8, 4) +NEON_POP(pmax_u8, neon_u8, 4) +NEON_POP(pmax_s16, neon_s16, 2) +NEON_POP(pmax_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) \ + dest = (src1 > src2) ? (src1 - src2) : (src2 - src1) +NEON_VOP(abd_s8, neon_s8, 4) +NEON_VOP(abd_u8, neon_u8, 4) +NEON_VOP(abd_s16, neon_s16, 2) +NEON_VOP(abd_u16, neon_u16, 2) +NEON_VOP(abd_s32, neon_s32, 1) +NEON_VOP(abd_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8 || \ + tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(shl_u8, neon_u8, 4) +NEON_VOP(shl_u16, neon_u16, 2) +NEON_VOP(shl_u32, neon_u32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64 || shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= -shift; + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(shl_s8, neon_s8, 4) +NEON_VOP(shl_s16, neon_s16, 2) +NEON_VOP(shl_s32, neon_s32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + int64_t val = valop; + if (shift >= 64) { + val = 0; + } else if (shift <= -64) { + val >>= 63; + } else if (shift < 0) { + val >>= -shift; + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if ((tmp >= (ssize_t)sizeof(src1) * 8) \ + || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(rshl_s8, neon_s8, 4) +NEON_VOP(rshl_s16, neon_s16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bits accumulator. */ +uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) +{ + int32_t dest; + int32_t val = (int32_t)valop; + int8_t shift = (int8_t)shiftop; + if ((shift >= 32) || (shift <= -32)) { + dest = 0; + } else if (shift < 0) { + int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + } + return dest; +} + +/* Handling addition overflow with 64 bits inputs values is more + * tricky than with 32 bits values. */ +uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + int64_t val = valop; + if ((shift >= 64) || (shift <= -64)) { + val = 0; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == INT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x4000000000000000LL; + } else { + val++; + val >>= 1; + } + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8 || \ + tmp < -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (-tmp - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(rshl_u8, neon_u8, 4) +NEON_VOP(rshl_u16, neon_u16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bits accumulator. */ +uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) +{ + uint32_t dest; + int8_t shift = (int8_t)shiftop; + if (shift >= 32 || shift < -32) { + dest = 0; + } else if (shift == -32) { + dest = val >> 31; + } else if (shift < 0) { + uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + } + return dest; +} + +/* Handling addition overflow with 64 bits inputs values is more + * tricky than with 32 bits values. */ +uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + if (shift >= 64 || shift < -64) { + val = 0; + } else if (shift == -64) { + /* Rounding a 1-bit result just preserves that bit. */ + val >>= 63; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == UINT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x8000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + }} while (0) +NEON_VOP_ENV(qshl_u8, neon_u8, 4) +NEON_VOP_ENV(qshl_u16, neon_u16, 2) +NEON_VOP_ENV(qshl_u32, neon_u32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64) { + if (val) { + val = ~(uint64_t)0; + SET_QC(); + } + } else if (shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= -shift; + } else { + uint64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = ~(uint64_t)0; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = src1; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> 31; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + }} while (0) +NEON_VOP_ENV(qshl_s8, neon_s8, 4) +NEON_VOP_ENV(qshl_s16, neon_s16, 2) +NEON_VOP_ENV(qshl_s32, neon_s32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + int64_t val = valop; + if (shift >= 64) { + if (val) { + SET_QC(); + val = (val >> 63) ^ ~SIGNBIT64; + } + } else if (shift <= -64) { + val >>= 63; + } else if (shift < 0) { + val >>= -shift; + } else { + int64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = (tmp >> 63) ^ ~SIGNBIT64; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + } \ + }} while (0) +NEON_VOP_ENV(qshlu_s8, neon_u8, 4) +NEON_VOP_ENV(qshlu_s16, neon_u16, 2) +#undef NEON_FN + +uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) +{ + if ((int32_t)valop < 0) { + SET_QC(); + return 0; + } + return helper_neon_qshl_u32(env, valop, shiftop); +} + +uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + if ((int64_t)valop < 0) { + SET_QC(); + return 0; + } + return helper_neon_qshl_u64(env, valop, shiftop); +} + +/* FIXME: This is wrong. */ +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + }} while (0) +NEON_VOP_ENV(qrshl_u8, neon_u8, 4) +NEON_VOP_ENV(qrshl_u16, neon_u16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bits accumulator. */ +uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) +{ + uint32_t dest; + int8_t shift = (int8_t)shiftop; + if (shift >= 32) { + if (val) { + SET_QC(); + dest = ~0; + } else { + dest = 0; + } + } else if (shift < -32) { + dest = 0; + } else if (shift == -32) { + dest = val >> 31; + } else if (shift < 0) { + uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + if ((dest >> shift) != val) { + SET_QC(); + dest = ~0; + } + } + return dest; +} + +/* Handling addition overflow with 64 bits inputs values is more + * tricky than with 32 bits values. */ +uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64) { + if (val) { + SET_QC(); + val = ~0; + } + } else if (shift < -64) { + val = 0; + } else if (shift == -64) { + val >>= 63; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == UINT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x8000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { \ + uint64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = ~0; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + }} while (0) +//NEON_VOP_ENV(qrshl_s8, neon_s8, 4) +//NEON_VOP_ENV(qrshl_s16, neon_s16, 2) +#undef NEON_FN + + + + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bits accumulator. */ +uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) +{ + int32_t dest; + int32_t val = (int32_t)valop; + int8_t shift = (int8_t)shiftop; + if (shift >= 32) { + if (val) { + SET_QC(); + dest = (val >> 31) ^ ~SIGNBIT; + } else { + dest = 0; + } + } else if (shift <= -32) { + dest = 0; + } else if (shift < 0) { + int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + if ((dest >> shift) != val) { + SET_QC(); + dest = (val >> 31) ^ ~SIGNBIT; + } + } + return dest; +} + +/* Handling addition overflow with 64 bits inputs values is more + * tricky than with 32 bits values. */ +uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + int64_t val = valop; + + if (shift >= 64) { + if (val) { + SET_QC(); + val = (val >> 63) ^ ~SIGNBIT64; + } + } else if (shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == INT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x4000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { + int64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = (tmp >> 63) ^ ~SIGNBIT64; + } + } + return val; +} + +uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) +{ + uint32_t mask; + mask = (a ^ b) & 0x80808080u; + a &= ~0x80808080u; + b &= ~0x80808080u; + return (a + b) ^ mask; +} + +uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b) +{ + uint32_t mask; + mask = (a ^ b) & 0x80008000u; + a &= ~0x80008000u; + b &= ~0x80008000u; + return (a + b) ^ mask; +} + +#define NEON_FN(dest, src1, src2) dest = src1 + src2 +NEON_POP(padd_u8, neon_u8, 4) +NEON_POP(padd_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = src1 - src2 +NEON_VOP(sub_u8, neon_u8, 4) +NEON_VOP(sub_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = src1 * src2 +NEON_VOP(mul_u8, neon_u8, 4) +NEON_VOP(mul_u16, neon_u16, 2) +#undef NEON_FN + +/* Polynomial multiplication is like integer multiplication except the + partial products are XORed, not added. */ +uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) +{ + uint32_t mask; + uint32_t result; + result = 0; + while (op1) { + mask = 0; + if (op1 & 1) + mask |= 0xff; + if (op1 & (1 << 8)) + mask |= (0xff << 8); + if (op1 & (1 << 16)) + mask |= (0xff << 16); + if (op1 & (1 << 24)) + mask |= (0xff << 24); + result ^= op2 & mask; + op1 = (op1 >> 1) & 0x7f7f7f7f; + op2 = (op2 << 1) & 0xfefefefe; + } + return result; +} + +uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) +{ + uint64_t result = 0; + uint64_t mask; + uint64_t op2ex = op2; + op2ex = (op2ex & 0xff) | + ((op2ex & 0xff00) << 8) | + ((op2ex & 0xff0000) << 16) | + ((op2ex & 0xff000000) << 24); + while (op1) { + mask = 0; + if (op1 & 1) { + mask |= 0xffff; + } + if (op1 & (1 << 8)) { + mask |= (0xffffU << 16); + } + if (op1 & (1 << 16)) { + mask |= (0xffffULL << 32); + } + if (op1 & (1 << 24)) { + mask |= (0xffffULL << 48); + } + result ^= op2ex & mask; + op1 = (op1 >> 1) & 0x7f7f7f7f; + op2ex <<= 1; + } + return result; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0 +NEON_VOP(tst_u8, neon_u8, 4) +NEON_VOP(tst_u16, neon_u16, 2) +NEON_VOP(tst_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0 +NEON_VOP(ceq_u8, neon_u8, 4) +NEON_VOP(ceq_u16, neon_u16, 2) +NEON_VOP(ceq_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src +NEON_VOP1(abs_s8, neon_s8, 4) +NEON_VOP1(abs_s16, neon_s16, 2) +#undef NEON_FN + +/* Count Leading Sign/Zero Bits. */ +static inline int do_clz8(uint8_t x) +{ + int n; + for (n = 8; x; n--) + x >>= 1; + return n; +} + +static inline int do_clz16(uint16_t x) +{ + int n; + for (n = 16; x; n--) + x >>= 1; + return n; +} + +#define NEON_FN(dest, src, dummy) dest = do_clz8(src) +NEON_VOP1(clz_u8, neon_u8, 4) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz16(src) +NEON_VOP1(clz_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1 +NEON_VOP1(cls_s8, neon_s8, 4) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1 +NEON_VOP1(cls_s16, neon_s16, 2) +#undef NEON_FN + +uint32_t HELPER(neon_cls_s32)(uint32_t x) +{ + int count; + if ((int32_t)x < 0) + x = ~x; + for (count = 32; x; count--) + x = x >> 1; + return count - 1; +} + +/* Bit count. */ +uint32_t HELPER(neon_cnt_u8)(uint32_t x) +{ + x = (x & 0x55555555) + ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); + return x; +} + +#define NEON_QDMULH16(dest, src1, src2, round) do { \ + uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ + SET_QC(); \ + tmp = (tmp >> 31) ^ ~SIGNBIT; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int32_t old = tmp; \ + tmp += 1 << 15; \ + if ((int32_t)tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT - 1; \ + } \ + } \ + dest = tmp >> 16; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0) +NEON_VOP_ENV(qdmulh_s16, neon_s16, 2) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1) +NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_QDMULH16 + +#define NEON_QDMULH32(dest, src1, src2, round) do { \ + uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \ + SET_QC(); \ + tmp = (tmp >> 63) ^ ~SIGNBIT64; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int64_t old = tmp; \ + tmp += (int64_t)1 << 31; \ + if ((int64_t)tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT64 - 1; \ + } \ + } \ + dest = tmp >> 32; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0) +NEON_VOP_ENV(qdmulh_s32, neon_s32, 1) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1) +NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1) +#undef NEON_FN +#undef NEON_QDMULH32 + +uint32_t HELPER(neon_narrow_u8)(uint64_t x) +{ + return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) + | ((x >> 24) & 0xff000000u); +} + +uint32_t HELPER(neon_narrow_u16)(uint64_t x) +{ + return (x & 0xffffu) | ((x >> 16) & 0xffff0000u); +} + +uint32_t HELPER(neon_narrow_high_u8)(uint64_t x) +{ + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) + | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); +} + +uint32_t HELPER(neon_narrow_high_u16)(uint64_t x) +{ + return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); +} + +uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x) +{ + x &= 0xff80ff80ff80ff80ull; + x += 0x0080008000800080ull; + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) + | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); +} + +uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) +{ + x &= 0xffff8000ffff8000ull; + x += 0x0000800000008000ull; + return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); +} + +uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) +{ + uint16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s & 0x8000) { \ + SET_QC(); \ + } else { \ + if (s > 0xff) { \ + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); \ + } + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) +{ + uint16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s > 0xff) { \ + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) +{ + int16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s != (int8_t)s) { \ + d = (s >> 15) ^ 0x7f; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) +{ + uint32_t high; + uint32_t low; + low = x; + if (low & 0x80000000) { + low = 0; + SET_QC(); + } else if (low > 0xffff) { + low = 0xffff; + SET_QC(); + } + high = x >> 32; + if (high & 0x80000000) { + high = 0; + SET_QC(); + } else if (high > 0xffff) { + high = 0xffff; + SET_QC(); + } + return low | (high << 16); +} + +uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) +{ + uint32_t high; + uint32_t low; + low = x; + if (low > 0xffff) { + low = 0xffff; + SET_QC(); + } + high = x >> 32; + if (high > 0xffff) { + high = 0xffff; + SET_QC(); + } + return low | (high << 16); +} + +uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) +{ + int32_t low; + int32_t high; + low = x; + if (low != (int16_t)low) { + low = (low >> 31) ^ 0x7fff; + SET_QC(); + } + high = x >> 32; + if (high != (int16_t)high) { + high = (high >> 31) ^ 0x7fff; + SET_QC(); + } + return (uint16_t)low | (high << 16); +} + +uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) +{ + if (x & 0x8000000000000000ull) { + SET_QC(); + return 0; + } + if (x > 0xffffffffu) { + SET_QC(); + return 0xffffffffu; + } + return x; +} + +uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) +{ + if (x > 0xffffffffu) { + SET_QC(); + return 0xffffffffu; + } + return x; +} + +uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) +{ + if ((int64_t)x != (int32_t)x) { + SET_QC(); + return ((int64_t)x >> 63) ^ 0x7fffffff; + } + return x; +} + +uint64_t HELPER(neon_widen_u8)(uint32_t x) +{ + uint64_t tmp; + uint64_t ret; + ret = (uint8_t)x; + tmp = (uint8_t)(x >> 8); + ret |= tmp << 16; + tmp = (uint8_t)(x >> 16); + ret |= tmp << 32; + tmp = (uint8_t)(x >> 24); + ret |= tmp << 48; + return ret; +} + +uint64_t HELPER(neon_widen_s8)(uint32_t x) +{ + uint64_t tmp; + uint64_t ret; + ret = (uint16_t)(int8_t)x; + tmp = (uint16_t)(int8_t)(x >> 8); + ret |= tmp << 16; + tmp = (uint16_t)(int8_t)(x >> 16); + ret |= tmp << 32; + tmp = (uint16_t)(int8_t)(x >> 24); + ret |= tmp << 48; + return ret; +} + +uint64_t HELPER(neon_widen_u16)(uint32_t x) +{ + uint64_t high = (uint16_t)(x >> 16); + return ((uint16_t)x) | (high << 32); +} + +uint64_t HELPER(neon_widen_s16)(uint32_t x) +{ + uint64_t high = (int16_t)(x >> 16); + return ((uint32_t)(int16_t)x) | (high << 32); +} + +uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ b) & 0x8000800080008000ull; + a &= ~0x8000800080008000ull; + b &= ~0x8000800080008000ull; + return (a + b) ^ mask; +} + +uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ b) & 0x8000000080000000ull; + a &= ~0x8000000080000000ull; + b &= ~0x8000000080000000ull; + return (a + b) ^ mask; +} + +uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) +{ + uint64_t tmp; + uint64_t tmp2; + + tmp = a & 0x0000ffff0000ffffull; + tmp += (a >> 16) & 0x0000ffff0000ffffull; + tmp2 = b & 0xffff0000ffff0000ull; + tmp2 += (b << 16) & 0xffff0000ffff0000ull; + return ( tmp & 0xffff) + | ((tmp >> 16) & 0xffff0000ull) + | ((tmp2 << 16) & 0xffff00000000ull) + | ( tmp2 & 0xffff000000000000ull); +} + +uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) +{ + uint32_t low = a + (a >> 32); + uint32_t high = b + (b >> 32); + return low + ((uint64_t)high << 32); +} + +uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ ~b) & 0x8000800080008000ull; + a |= 0x8000800080008000ull; + b &= ~0x8000800080008000ull; + return (a - b) ^ mask; +} + +uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ ~b) & 0x8000000080000000ull; + a |= 0x8000000080000000ull; + b &= ~0x8000000080000000ull; + return (a - b) ^ mask; +} + +uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint32_t x, y; + uint32_t low, high; + + x = a; + y = b; + low = x + y; + if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { + SET_QC(); + low = ((int32_t)x >> 31) ^ ~SIGNBIT; + } + x = a >> 32; + y = b >> 32; + high = x + y; + if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { + SET_QC(); + high = ((int32_t)x >> 31) ^ ~SIGNBIT; + } + return low | ((uint64_t)high << 32); +} + +uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint64_t result; + + result = a + b; + if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) { + SET_QC(); + result = ((int64_t)a >> 63) ^ ~SIGNBIT64; + } + return result; +} + +/* We have to do the arithmetic in a larger type than + * the input type, because for example with a signed 32 bit + * op the absolute difference can overflow a signed 32 bit value. + */ +#define DO_ABD(dest, x, y, intype, arithtype) do { \ + arithtype tmp_x = (intype)(x); \ + arithtype tmp_y = (intype)(y); \ + dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \ + } while(0) + +uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, uint8_t, uint32_t); + DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t); + result |= tmp << 16; + DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t); + result |= tmp << 32; + DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, int8_t, int32_t); + DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t); + result |= tmp << 16; + DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t); + result |= tmp << 32; + DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, uint16_t, uint32_t); + DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, int16_t, int32_t); + DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b) +{ + uint64_t result; + DO_ABD(result, a, b, uint32_t, uint64_t); + return result; +} + +uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) +{ + uint64_t result; + DO_ABD(result, a, b, int32_t, int64_t); + return result; +} +#undef DO_ABD + +/* Widening multiply. Named type is the source type. */ +#define DO_MULL(dest, x, y, type1, type2) do { \ + type1 tmp_x = x; \ + type1 tmp_y = y; \ + dest = (type2)((type2)tmp_x * (type2)tmp_y); \ + } while(0) + +uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, uint8_t, uint16_t); + DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t); + result |= tmp << 16; + DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t); + result |= tmp << 32; + DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, int8_t, uint16_t); + DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t); + result |= tmp << 16; + DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t); + result |= tmp << 32; + DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, uint16_t, uint32_t); + DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, int16_t, uint32_t); + DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_negl_u16)(uint64_t x) +{ + uint16_t tmp; + uint64_t result; + result = (uint16_t)-x; + tmp = -(x >> 16); + result |= (uint64_t)tmp << 16; + tmp = -(x >> 32); + result |= (uint64_t)tmp << 32; + tmp = -(x >> 48); + result |= (uint64_t)tmp << 48; + return result; +} + +uint64_t HELPER(neon_negl_u32)(uint64_t x) +{ + uint32_t low = -x; + uint32_t high = -(x >> 32); + return low | ((uint64_t)high << 32); +} + +/* FIXME: There should be a native op for this. */ +uint64_t HELPER(neon_negl_u64)(uint64_t x) +{ + return -x; +} + +/* Saturnating sign manuipulation. */ +/* ??? Make these use NEON_VOP1 */ +#define DO_QABS8(x) do { \ + if (x == (int8_t)0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) +{ + neon_s8 vec; + NEON_UNPACK(neon_s8, vec, x); + DO_QABS8(vec.v1); + DO_QABS8(vec.v2); + DO_QABS8(vec.v3); + DO_QABS8(vec.v4); + NEON_PACK(neon_s8, x, vec); + return x; +} +#undef DO_QABS8 + +#define DO_QNEG8(x) do { \ + if (x == (int8_t)0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) +{ + neon_s8 vec; + NEON_UNPACK(neon_s8, vec, x); + DO_QNEG8(vec.v1); + DO_QNEG8(vec.v2); + DO_QNEG8(vec.v3); + DO_QNEG8(vec.v4); + NEON_PACK(neon_s8, x, vec); + return x; +} +#undef DO_QNEG8 + +#define DO_QABS16(x) do { \ + if (x == (int16_t)0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) +{ + neon_s16 vec; + NEON_UNPACK(neon_s16, vec, x); + DO_QABS16(vec.v1); + DO_QABS16(vec.v2); + NEON_PACK(neon_s16, x, vec); + return x; +} +#undef DO_QABS16 + +#define DO_QNEG16(x) do { \ + if (x == (int16_t)0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) +{ + neon_s16 vec; + NEON_UNPACK(neon_s16, vec, x); + DO_QNEG16(vec.v1); + DO_QNEG16(vec.v2); + NEON_PACK(neon_s16, x, vec); + return x; +} +#undef DO_QNEG16 + +uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) +{ + if (x == SIGNBIT) { + SET_QC(); + x = ~SIGNBIT; + } else if ((int32_t)x < 0) { + x = -x; + } + return x; +} + +uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) +{ + if (x == SIGNBIT) { + SET_QC(); + x = ~SIGNBIT; + } else { + x = -x; + } + return x; +} + +/* NEON Float helpers. */ +uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return float32_val(float32_min(make_float32(a), make_float32(b), fpst)); +} + +uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return float32_val(float32_max(make_float32(a), make_float32(b), fpst)); +} + +uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = make_float32(a); + float32 f1 = make_float32(b); + return float32_val(float32_abs(float32_sub(f0, f1, fpst))); +} + +/* Floating point comparisons produce an integer result. + * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. + * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. + */ +uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_eq_quiet(make_float32(a), make_float32(b), fpst); +} + +uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_le(make_float32(b), make_float32(a), fpst); +} + +uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_lt(make_float32(b), make_float32(a), fpst); +} + +uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = float32_abs(make_float32(a)); + float32 f1 = float32_abs(make_float32(b)); + return -float32_le(f1, f0, fpst); +} + +uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = float32_abs(make_float32(a)); + float32 f1 = float32_abs(make_float32(b)); + return -float32_lt(f1, f0, fpst); +} + +#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) + +void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) + | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) + | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) + | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56); + uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8) + | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24) + | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40) + | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56); + uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8) + | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24) + | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40) + | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56); + uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8) + | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) + | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) + | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) + | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); + uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) + | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48); + uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16) + | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); + uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) + | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32); + uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32); + uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32); + uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) + | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) + | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) + | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56); + uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8) + | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) + | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) + | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) + | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); + uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) + | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) + | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) + | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) + | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56); + uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8) + | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24) + | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40) + | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56); + uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8) + | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24) + | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40) + | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56); + uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8) + | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) + | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) + | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) + | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); + uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) + | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48); + uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16) + | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); + uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) + | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32); + uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32); + uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32); + uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) + | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) + | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) + | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56); + uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8) + | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) + | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) + | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) + | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); + uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) + | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} diff --git a/src/target-arm/op_addsub.h b/src/target-arm/op_addsub.h new file mode 100644 index 0000000..ca4a189 --- /dev/null +++ b/src/target-arm/op_addsub.h @@ -0,0 +1,103 @@ +/* + * ARMv6 integer SIMD operations. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#ifdef ARITH_GE +#define GE_ARG , void *gep +#define DECLARE_GE uint32_t ge = 0 +#define SET_GE *(uint32_t *)gep = ge +#else +#define GE_ARG +#define DECLARE_GE do{}while(0) +#define SET_GE do{}while(0) +#endif + +#define RESULT(val, n, width) \ + res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width) + +uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD16(a, b, 0); + ADD16(a >> 16, b >> 16, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD8(a, b, 0); + ADD8(a >> 8, b >> 8, 1); + ADD8(a >> 16, b >> 16, 2); + ADD8(a >> 24, b >> 24, 3); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB16(a, b, 0); + SUB16(a >> 16, b >> 16, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB8(a, b, 0); + SUB8(a >> 8, b >> 8, 1); + SUB8(a >> 16, b >> 16, 2); + SUB8(a >> 24, b >> 24, 3); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD16(a, b >> 16, 0); + SUB16(a >> 16, b, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB16(a, b >> 16, 0); + ADD16(a >> 16, b, 1); + SET_GE; + return res; +} + +#undef GE_ARG +#undef DECLARE_GE +#undef SET_GE +#undef RESULT + +#undef ARITH_GE +#undef PFX +#undef ADD16 +#undef SUB16 +#undef ADD8 +#undef SUB8 diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c new file mode 100644 index 0000000..918120f --- /dev/null +++ b/src/target-arm/op_helper.c @@ -0,0 +1,463 @@ +/// Copyright (C) 2003 Fabrice Bellard +/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL +/// Copyright (C) 2017 Adrian Herrera +/// Copyrights of all contributions belong to their respective owners. +/// +/// This library is free software; you can redistribute it and/or +/// modify it under the terms of the GNU Library General Public +/// License as published by the Free Software Foundation; either +/// version 2 of the License, or (at your option) any later version. +/// +/// This library is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +/// Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with this library; if not, see . + +#include "cpu-defs.h" +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + + + + + +#define SIGNBIT (uint32_t) 0x80000000 +#define SIGNBIT64 ((uint64_t) 1 << 63) + +#ifdef SYMBEX_LLVM_LIB +#include "llvm-lib.h" +#endif + +struct CPUARMState *env = 0; + +static void raise_exception(int tt) { + env->exception_index = tt; + cpu_loop_exit(env); +} + +uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t maxindex) { + uint32_t val; + uint32_t tmp; + int index; + int shift; + uint64_t *table; + table = (uint64_t *) &env->vfp.regs[rn]; + val = 0; + for (shift = 0; shift < 32; shift += 8) { + index = (ireg >> shift) & 0xff; + if (index < maxindex) { + tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; + val |= tmp << shift; + } else { + val |= def & (0xff << shift); + } + } + return val; +} + +#include "softmmu_exec.h" + +#define MMUSUFFIX _mmu + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" + +#if defined(CONFIG_SYMBEX) && !defined(SYMBEX_LLVM_LIB) +#undef MMUSUFFIX +#define MMUSUFFIX _mmu_symb +#define _raw _raw_symb + +#define SHIFT 0 +#include "softmmu_header.h" + +#define SHIFT 1 +#include "softmmu_header.h" + +#define SHIFT 2 +#include "softmmu_header.h" + +#define SHIFT 3 +#include "softmmu_header.h" + +#undef _raw +#endif + +#ifdef CONFIG_SYMBEX +#include + +/* This will be called from S2EExecutor if running concretely; It will + in turn call the real ARM IRQ handler with current CPUARMState.*/ +void s2e_do_interrupt(void) { + s2e_helper_do_interrupt(env); +} +#endif + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int is_write, int mmu_idx, void *retaddr) { + TranslationBlock *tb; + CPUArchState *saved_env; + unsigned long pc; + int ret; + + saved_env = env; + + if (env != env1) + env = env1; + +#ifdef CONFIG_SYMBEX + s2e_on_tlb_miss(g_s2e, g_s2e_state, addr, is_write); + ret = cpu_arm_handle_mmu_fault(env, page_addr, is_write, mmu_idx); +#else + ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx); +#endif + + if (unlikely(ret)) { + +#ifdef CONFIG_SYMBEX + /* In S2E we pass page address instead of addr to cpu_arm_handle_mmu_fault, + since the latter can be symbolic while the former is always concrete. + To compensate, we reset fault address here. */ + if (env->exception_index == EXCP_PREFETCH_ABORT || env->exception_index == EXCP_DATA_ABORT) { + assert(1 && "handle coprocessor exception properly"); + } +#endif + + if (retaddr) { + /* now we have a real cpu fault */ + pc = (uintptr_t) retaddr; + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc); + } + } + +#ifdef CONFIG_SYMBEX + s2e_on_page_fault(g_s2e, g_s2e_state, addr, is_write); +#endif + + raise_exception(env->exception_index); + } + if (saved_env != env) + env = saved_env; +} + +/* FIXME: Pass an axplicit pointer to QF to CPUARMState, and move saturating + instructions into helper.c */ +uint32_t HELPER(add_setq)(uint32_t a, uint32_t b) { + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) + env->QF = 1; + return res; +} + +uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b) { + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t) a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b) { + uint32_t res = a - b; + if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t) a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(double_saturate)(int32_t val) { + uint32_t res; + if (val >= 0x40000000) { + res = ~SIGNBIT; + env->QF = 1; + } else if (val <= (int32_t) 0xc0000000) { + res = SIGNBIT; + env->QF = 1; + } else { + res = val << 1; + } + return res; +} + +uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b) { + uint32_t res = a + b; + if (res < a) { + env->QF = 1; + res = ~0; + } + return res; +} + +uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b) { + uint32_t res = a - b; + if (res > a) { + env->QF = 1; + res = 0; + } + return res; +} + +/* Signed saturation. */ +static inline uint32_t do_ssat(int32_t val, int shift) { + int32_t top; + uint32_t mask; + + top = val >> shift; + mask = (1u << shift) - 1; + if (top > 0) { + env->QF = 1; + return mask; + } else if (top < -1) { + env->QF = 1; + return ~mask; + } + return val; +} + +/* Unsigned saturation. */ +static inline uint32_t do_usat(int32_t val, int shift) { + uint32_t max; + + max = (1u << shift) - 1; + if (val < 0) { + env->QF = 1; + return 0; + } else if (val > max) { + env->QF = 1; + return max; + } + return val; +} + +/* Signed saturate. */ +uint32_t HELPER(ssat)(uint32_t x, uint32_t shift) { + return do_ssat(x, shift); +} + +/* Dual halfword signed saturate. */ +uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift) { + uint32_t res; + + res = (uint16_t) do_ssat((int16_t) x, shift); + res |= do_ssat(((int32_t) x) >> 16, shift) << 16; + return res; +} + +/* Unsigned saturate. */ +uint32_t HELPER(usat)(uint32_t x, uint32_t shift) { + return do_usat(x, shift); +} + +/* Dual halfword unsigned saturate. */ +uint32_t HELPER(usat16)(uint32_t x, uint32_t shift) { + uint32_t res; + + res = (uint16_t) do_usat((int16_t) x, shift); + res |= do_usat(((int32_t) x) >> 16, shift) << 16; + return res; +} + +void HELPER(wfi)(void) { + env->exception_index = EXCP_HLT; + env->halted = 1; + cpu_loop_exit(env); +} + +void HELPER(exception)(uint32_t excp) { + env->exception_index = excp; + cpu_loop_exit(env); +} + +uint32_t HELPER(cpsr_read)(void) { + return cpsr_read(env) & ~CPSR_EXEC; +} + +void HELPER(cpsr_write)(uint32_t val, uint32_t mask) { + cpsr_write(env, val, mask); +} + +/* Access to user mode registers from privileged modes. */ +uint32_t HELPER(get_user_reg)(uint32_t regno) { + uint32_t val; + + if (regno == 13) { + val = RR_cpu(env, banked_r13[0]); + } else if (regno == 14) { + val = RR_cpu(env, banked_r14[0]); + } else if (regno == 15) { + val = env->regs[regno]; + } else if (regno >= 8 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + val = RR_cpu(env, usr_regs[regno - 8]); + } else { + val = RR_cpu(env, regs[regno]); + } + return val; +} + +void HELPER(set_user_reg)(uint32_t regno, uint32_t val) { + if (regno == 13) { + WR_cpu(env, banked_r13[0], val); + } else if (regno == 14) { + WR_cpu(env, banked_r14[0], val); + } else if (regno == 15) { + env->regs[regno] = val; + } else if (regno >= 8 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + WR_cpu(env, usr_regs[regno - 8], val); + } else { + WR_cpu(env, regs[regno], val); + } +} + +/* ??? Flag setting arithmetic is awkward because we need to do comparisons. + The only way to do that in TCG is a conditional branch, which clobbers + all our temporaries. For now implement these as helper functions. */ + +uint32_t HELPER(add_cc)(uint32_t a, uint32_t b) { + uint32_t result; + result = a + b; + WR_cpu(env, NF, result); + WR_cpu(env, ZF, result); + WR_cpu(env, CF, (result < a)); + WR_cpu(env, VF, ((a ^ b ^ -1) & (a ^ result))); + return result; +} + +uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) { + uint32_t result; + if (!(RR_cpu(env, CF))) { + result = a + b; + WR_cpu(env, CF, (result < a)); + } else { + result = a + b + 1; + WR_cpu(env, CF, (result <= a)); + } + WR_cpu(env, VF, ((a ^ b ^ -1) & (a ^ result))); + WR_cpu(env, NF, result); + WR_cpu(env, ZF, result); + return result; +} + +uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) { + uint32_t result; + result = a - b; + WR_cpu(env, NF, result); + WR_cpu(env, ZF, result); + WR_cpu(env, CF, (a >= b)); + WR_cpu(env, VF, ((a ^ b) & (a ^ result))); + return result; +} + +uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b) { + uint32_t result; + if (!(RR_cpu(env, CF))) { + result = a - b - 1; + WR_cpu(env, CF, (a > b)); + } else { + result = a - b; + WR_cpu(env, CF, (a >= b)); + } + WR_cpu(env, VF, ((a ^ b) & (a ^ result))); + WR_cpu(env, NF, result); + WR_cpu(env, ZF, result); + return result; +} + +/* Similarly for variable shift instructions. */ + +uint32_t HELPER(shl)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) + return 0; + return x << shift; +} + +uint32_t HELPER(shr)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) + return 0; + return (uint32_t) x >> shift; +} + +uint32_t HELPER(sar)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) + shift = 31; + return (int32_t) x >> shift; +} + +uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + WR_cpu(env, CF, (x & 1)); + else + WR_cpu(env, CF, 0); + return 0; + } else if (shift != 0) { + WR_cpu(env, CF, ((x >> (32 - shift)) & 1)); + return x << shift; + } + return x; +} + +uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + WR_cpu(env, CF, ((x >> 31) & 1)); + else + WR_cpu(env, CF, 0); + return 0; + } else if (shift != 0) { + WR_cpu(env, CF, ((x >> (shift - 1)) & 1)); + return x >> shift; + } + return x; +} + +uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) { + int shift = i & 0xff; + if (shift >= 32) { + WR_cpu(env, CF, ((x >> 31) & 1)); + return (int32_t) x >> 31; + } else if (shift != 0) { + WR_cpu(env, CF, ((x >> (shift - 1)) & 1)); + return (int32_t) x >> shift; + } + return x; +} + +uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i) { + int shift1, shift; + shift1 = i & 0xff; + shift = shift1 & 0x1f; + if (shift == 0) { + if (shift1 != 0) + WR_cpu(env, CF, ((x >> 31) & 1)); + return x; + } else { + WR_cpu(env, CF, ((x >> (shift - 1)) & 1)); + return ((uint32_t) x >> shift) | (x << (32 - shift)); + } +} diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c new file mode 100644 index 0000000..3bce756 --- /dev/null +++ b/src/target-arm/translate.c @@ -0,0 +1,10207 @@ +/* + * ARM translation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005-2007 CodeSourcery + * Copyright (c) 2007 OpenedHand, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include +#include + +// clang-format off +#include "cpu.h" +#include +// clang-format on + +#include "helper.h" +#define GEN_HELPER 1 +#include "helper.h" + + + +#include + + + +#ifdef CONFIG_SYMBEX +#include +#endif + +#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T) +#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5) +/* currently all emulated v5 cores are also v5TE, so don't bother */ +#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5) +#define ENABLE_ARCH_5J 0 +#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) +#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K) +#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2) +#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7) + +#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) + +/* internal defines */ +typedef struct DisasContext { + target_ulong pc; + int is_jmp; + /* Nonzero if this instruction has been conditionally skipped. */ + int condjmp; + /* The label that will be jumped to when the instruction is skipped. */ + int condlabel; + /* Thumb-2 condtional execution bits. */ + int condexec_mask; + int condexec_cond; + struct TranslationBlock *tb; + int singlestep_enabled; + int thumb; + int bswap_code; +#if !defined(CONFIG_USER_ONLY) + int user; +#endif + int vfp_enabled; + int vec_len; + int vec_stride; +} DisasContext; + +static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; + +#if defined(CONFIG_USER_ONLY) +#define IS_USER(s) 1 +#else +#define IS_USER(s) (s->user) +#endif + +/* These instructions trap after executing, so defer them until after the + conditional executions state has been updated. */ +#define DISAS_WFI 4 +#define DISAS_SWI 5 + +static TCGv_ptr cpu_env; +/* We reuse the same 64-bit temporaries for efficiency. */ +static TCGv_i64 cpu_V0, cpu_V1, cpu_M0; +static TCGv_i32 cpu_R[16]; +static TCGv_i32 cpu_exclusive_addr; +static TCGv_i32 cpu_exclusive_val; +static TCGv_i32 cpu_exclusive_high; +#ifdef CONFIG_USER_ONLY +static TCGv_i32 cpu_exclusive_test; +static TCGv_i32 cpu_exclusive_info; +#endif + +/* FIXME: These should be removed. */ +static TCGv cpu_F0s, cpu_F1s; +static TCGv_i64 cpu_F0d, cpu_F1d; + +//#include "gen-icount.h" + +static const char *regnames[] = + { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; + +/* initialize TCG globals. */ +void arm_translate_init(void) +{ + int i; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + + for (i = 0; i < 16; i++) { + cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, regs[i]), + regnames[i]); + } + cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, exclusive_val), "exclusive_val"); + cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, exclusive_high), "exclusive_high"); +#ifdef CONFIG_USER_ONLY + cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, exclusive_test), "exclusive_test"); + cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0, + offsetof(CPUARMState, exclusive_info), "exclusive_info"); +#endif + +#define GEN_HELPER 2 +#include "helper.h" +} + +static inline TCGv load_cpu_offset(int offset) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offset); + return tmp; +} + +#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) + +static inline void store_cpu_offset(TCGv var, int offset) +{ + tcg_gen_st_i32(var, cpu_env, offset); + tcg_temp_free_i32(var); +} + +#define store_cpu_field(var, name) \ + store_cpu_offset(var, offsetof(CPUARMState, name)) + +/* Set a variable to the value of a CPU register. */ +static void load_reg_var(DisasContext *s, TCGv var, int reg) +{ + if (reg == 15) { + uint32_t addr; + /* normaly, since we updated PC, we need only to add one insn */ + if (s->thumb) + addr = (long)s->pc + 2; + else + addr = (long)s->pc + 4; + tcg_gen_movi_i32(var, addr); + } else { + tcg_gen_mov_i32(var, cpu_R[reg]); + } +} + +/* Create a new temporary and set it to the value of a CPU register. */ +static inline TCGv load_reg(DisasContext *s, int reg) +{ + TCGv tmp = tcg_temp_new_i32(); + load_reg_var(s, tmp, reg); + return tmp; +} + +/* Set a CPU register. The source must be a temporary and will be + marked as dead. */ +static void store_reg(DisasContext *s, int reg, TCGv var) +{ + if (reg == 15) { + tcg_gen_andi_i32(var, var, ~1); + s->is_jmp = DISAS_JUMP; + } + tcg_gen_mov_i32(cpu_R[reg], var); + tcg_temp_free_i32(var); +} + +/* Value extensions. */ +#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) +#define gen_uxth(var) tcg_gen_ext16u_i32(var, var) +#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var) +#define gen_sxth(var) tcg_gen_ext16s_i32(var, var) + +#define gen_sxtb16(var) gen_helper_sxtb16(var, var) +#define gen_uxtb16(var) gen_helper_uxtb16(var, var) + + +static inline void gen_set_cpsr(TCGv var, uint32_t mask) +{ + TCGv tmp_mask = tcg_const_i32(mask); + gen_helper_cpsr_write(var, tmp_mask); + tcg_temp_free_i32(tmp_mask); +} +/* Set NZCV flags from the high 4 bits of var. */ +#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV) + +static void gen_exception(int excp) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, excp); + gen_helper_exception(tmp); + tcg_temp_free_i32(tmp); +} + +static void gen_smul_dual(TCGv a, TCGv b) +{ + TCGv tmp1 = tcg_temp_new_i32(); + TCGv tmp2 = tcg_temp_new_i32(); + tcg_gen_ext16s_i32(tmp1, a); + tcg_gen_ext16s_i32(tmp2, b); + tcg_gen_mul_i32(tmp1, tmp1, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_sari_i32(a, a, 16); + tcg_gen_sari_i32(b, b, 16); + tcg_gen_mul_i32(b, b, a); + tcg_gen_mov_i32(a, tmp1); + tcg_temp_free_i32(tmp1); +} + +/* Byteswap each halfword. */ +static void gen_rev16(TCGv var) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_shri_i32(tmp, var, 8); + tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff); + tcg_gen_shli_i32(var, var, 8); + tcg_gen_andi_i32(var, var, 0xff00ff00); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); +} + +/* Byteswap low halfword and sign extend. */ +static void gen_revsh(TCGv var) +{ + tcg_gen_ext16u_i32(var, var); + tcg_gen_bswap16_i32(var, var); + tcg_gen_ext16s_i32(var, var); +} + +/* Unsigned bitfield extract. */ +static void gen_ubfx(TCGv var, int shift, uint32_t mask) +{ + if (shift) + tcg_gen_shri_i32(var, var, shift); + tcg_gen_andi_i32(var, var, mask); +} + +/* Signed bitfield extract. */ +static void gen_sbfx(TCGv var, int shift, int width) +{ + uint32_t signbit; + + if (shift) + tcg_gen_sari_i32(var, var, shift); + if (shift + width < 32) { + signbit = 1u << (width - 1); + tcg_gen_andi_i32(var, var, (1u << width) - 1); + tcg_gen_xori_i32(var, var, signbit); + tcg_gen_subi_i32(var, var, signbit); + } +} + +/* Bitfield insertion. Insert val into base. Clobbers base and val. */ +static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) +{ + tcg_gen_andi_i32(val, val, mask); + tcg_gen_shli_i32(val, val, shift); + tcg_gen_andi_i32(base, base, ~(mask << shift)); + tcg_gen_or_i32(dest, base, val); +} + +/* Return (b << 32) + a. Mark inputs as dead */ +static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b) +{ + TCGv_i64 tmp64 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(tmp64, b); + tcg_temp_free_i32(b); + tcg_gen_shli_i64(tmp64, tmp64, 32); + tcg_gen_add_i64(a, tmp64, a); + + tcg_temp_free_i64(tmp64); + return a; +} + +/* Return (b << 32) - a. Mark inputs as dead. */ +static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b) +{ + TCGv_i64 tmp64 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(tmp64, b); + tcg_temp_free_i32(b); + tcg_gen_shli_i64(tmp64, tmp64, 32); + tcg_gen_sub_i64(a, tmp64, a); + + tcg_temp_free_i64(tmp64); + return a; +} + +/* FIXME: Most targets have native widening multiplication. + It would be good to use that instead of a full wide multiply. */ +/* 32x32->64 multiply. Marks inputs as dead. */ +static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b) +{ + TCGv_i64 tmp1 = tcg_temp_new_i64(); + TCGv_i64 tmp2 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(tmp1, a); + tcg_temp_free_i32(a); + tcg_gen_extu_i32_i64(tmp2, b); + tcg_temp_free_i32(b); + tcg_gen_mul_i64(tmp1, tmp1, tmp2); + tcg_temp_free_i64(tmp2); + return tmp1; +} + +static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) +{ + TCGv_i64 tmp1 = tcg_temp_new_i64(); + TCGv_i64 tmp2 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(tmp1, a); + tcg_temp_free_i32(a); + tcg_gen_ext_i32_i64(tmp2, b); + tcg_temp_free_i32(b); + tcg_gen_mul_i64(tmp1, tmp1, tmp2); + tcg_temp_free_i64(tmp2); + return tmp1; +} + +/* Swap low and high halfwords. */ +static void gen_swap_half(TCGv var) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_shri_i32(tmp, var, 16); + tcg_gen_shli_i32(var, var, 16); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); +} + +/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. + tmp = (t0 ^ t1) & 0x8000; + t0 &= ~0x8000; + t1 &= ~0x8000; + t0 = (t0 + t1) ^ tmp; + */ + +static void gen_add16(TCGv t0, TCGv t1) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_xor_i32(tmp, t0, t1); + tcg_gen_andi_i32(tmp, tmp, 0x8000); + tcg_gen_andi_i32(t0, t0, ~0x8000); + tcg_gen_andi_i32(t1, t1, ~0x8000); + tcg_gen_add_i32(t0, t0, t1); + tcg_gen_xor_i32(t0, t0, tmp); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(t1); +} + +#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF)) + +/* Set CF to the top bit of var. */ +static void gen_set_CF_bit31(TCGv var) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_shri_i32(tmp, var, 31); + gen_set_CF(tmp); + tcg_temp_free_i32(tmp); +} + +/* Set N and Z flags from var. */ +static inline void gen_logic_CC(TCGv var) +{ + tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF)); + tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF)); +} + +/* T0 += T1 + CF. */ +static void gen_adc(TCGv t0, TCGv t1) +{ + TCGv tmp; + tcg_gen_add_i32(t0, t0, t1); + tmp = load_cpu_field(CF); + tcg_gen_add_i32(t0, t0, tmp); + tcg_temp_free_i32(tmp); +} + +/* dest = T0 + T1 + CF. */ +static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) +{ + TCGv tmp; + tcg_gen_add_i32(dest, t0, t1); + tmp = load_cpu_field(CF); + tcg_gen_add_i32(dest, dest, tmp); + tcg_temp_free_i32(tmp); +} + +/* dest = T0 - T1 + CF - 1. */ +static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) +{ + TCGv tmp; + tcg_gen_sub_i32(dest, t0, t1); + tmp = load_cpu_field(CF); + tcg_gen_add_i32(dest, dest, tmp); + tcg_gen_subi_i32(dest, dest, 1); + tcg_temp_free_i32(tmp); +} + +/* FIXME: Implement this natively. */ +#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1) + +static void shifter_out_im(TCGv var, int shift) +{ + TCGv tmp = tcg_temp_new_i32(); + if (shift == 0) { + tcg_gen_andi_i32(tmp, var, 1); + } else { + tcg_gen_shri_i32(tmp, var, shift); + if (shift != 31) + tcg_gen_andi_i32(tmp, tmp, 1); + } + gen_set_CF(tmp); + tcg_temp_free_i32(tmp); +} + +/* Shift by immediate. Includes special handling for shift == 0. */ +static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags) +{ + switch (shiftop) { + case 0: /* LSL */ + if (shift != 0) { + if (flags) + shifter_out_im(var, 32 - shift); + tcg_gen_shli_i32(var, var, shift); + } + break; + case 1: /* LSR */ + if (shift == 0) { + if (flags) { + tcg_gen_shri_i32(var, var, 31); + gen_set_CF(var); + } + tcg_gen_movi_i32(var, 0); + } else { + if (flags) + shifter_out_im(var, shift - 1); + tcg_gen_shri_i32(var, var, shift); + } + break; + case 2: /* ASR */ + if (shift == 0) + shift = 32; + if (flags) + shifter_out_im(var, shift - 1); + if (shift == 32) + shift = 31; + tcg_gen_sari_i32(var, var, shift); + break; + case 3: /* ROR/RRX */ + if (shift != 0) { + if (flags) + shifter_out_im(var, shift - 1); + tcg_gen_rotri_i32(var, var, shift); break; + } else { + TCGv tmp = load_cpu_field(CF); + if (flags) + shifter_out_im(var, 0); + tcg_gen_shri_i32(var, var, 1); + tcg_gen_shli_i32(tmp, tmp, 31); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); + } + } +}; + +static inline void gen_arm_shift_reg(TCGv var, int shiftop, + TCGv shift, int flags) +{ + if (flags) { + switch (shiftop) { + case 0: gen_helper_shl_cc(var, var, shift); break; + case 1: gen_helper_shr_cc(var, var, shift); break; + case 2: gen_helper_sar_cc(var, var, shift); break; + case 3: gen_helper_ror_cc(var, var, shift); break; + } + } else { + switch (shiftop) { + case 0: gen_helper_shl(var, var, shift); break; + case 1: gen_helper_shr(var, var, shift); break; + case 2: gen_helper_sar(var, var, shift); break; + case 3: tcg_gen_andi_i32(shift, shift, 0x1f); + tcg_gen_rotr_i32(var, var, shift); break; + } + } + tcg_temp_free_i32(shift); +} + +#define PAS_OP(pfx) \ + switch (op2) { \ + case 0: gen_pas_helper(glue(pfx,add16)); break; \ + case 1: gen_pas_helper(glue(pfx,addsubx)); break; \ + case 2: gen_pas_helper(glue(pfx,subaddx)); break; \ + case 3: gen_pas_helper(glue(pfx,sub16)); break; \ + case 4: gen_pas_helper(glue(pfx,add8)); break; \ + case 7: gen_pas_helper(glue(pfx,sub8)); break; \ + } +static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b) +{ + TCGv_ptr tmp; + + switch (op1) { +#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp) + case 1: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tmp); + break; + case 5: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tmp); + break; +#undef gen_pas_helper +#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b) + case 2: + PAS_OP(q); + break; + case 3: + PAS_OP(sh); + break; + case 6: + PAS_OP(uq); + break; + case 7: + PAS_OP(uh); + break; +#undef gen_pas_helper + } +} +#undef PAS_OP + +/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */ +#define PAS_OP(pfx) \ + switch (op1) { \ + case 0: gen_pas_helper(glue(pfx,add8)); break; \ + case 1: gen_pas_helper(glue(pfx,add16)); break; \ + case 2: gen_pas_helper(glue(pfx,addsubx)); break; \ + case 4: gen_pas_helper(glue(pfx,sub8)); break; \ + case 5: gen_pas_helper(glue(pfx,sub16)); break; \ + case 6: gen_pas_helper(glue(pfx,subaddx)); break; \ + } +static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) +{ + TCGv_ptr tmp; + + switch (op2) { +#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp) + case 0: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tmp); + break; + case 4: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tmp); + break; +#undef gen_pas_helper +#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b) + case 1: + PAS_OP(q); + break; + case 2: + PAS_OP(sh); + break; + case 5: + PAS_OP(uq); + break; + case 6: + PAS_OP(uh); + break; +#undef gen_pas_helper + } +} +#undef PAS_OP + +static void gen_test_cc(int cc, int label) +{ + TCGv tmp; + TCGv tmp2; + int inv; + + switch (cc) { + case 0: /* eq: Z */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 1: /* ne: !Z */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + break; + case 2: /* cs: C */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + break; + case 3: /* cc: !C */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 4: /* mi: N */ + tmp = load_cpu_field(NF); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 5: /* pl: !N */ + tmp = load_cpu_field(NF); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 6: /* vs: V */ + tmp = load_cpu_field(VF); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 7: /* vc: !V */ + tmp = load_cpu_field(VF); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 8: /* hi: C && !Z */ + inv = gen_new_label(); + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + gen_set_label(inv); + break; + case 9: /* ls: !C || Z */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 10: /* ge: N == V -> N ^ V == 0 */ + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 11: /* lt: N != V -> N ^ V != 0 */ + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 12: /* gt: !Z && N == V */ + inv = gen_new_label(); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + gen_set_label(inv); + break; + case 13: /* le: Z || N != V */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + default: + fprintf(stderr, "Bad condition code 0x%x\n", cc); + abort(); + } + tcg_temp_free_i32(tmp); +} + +static const uint8_t table_logic_cc[16] = { + 1, /* and */ + 1, /* xor */ + 0, /* sub */ + 0, /* rsb */ + 0, /* add */ + 0, /* adc */ + 0, /* sbc */ + 0, /* rsc */ + 1, /* andl */ + 1, /* xorl */ + 0, /* cmp */ + 0, /* cmn */ + 1, /* orr */ + 1, /* mov */ + 1, /* bic */ + 1, /* mvn */ +}; + +/* Set PC and Thumb state from an immediate address. */ +static inline void gen_bx_im(DisasContext *s, uint32_t addr) +{ + TCGv tmp; + + s->is_jmp = DISAS_UPDATE; + if (s->thumb != (addr & 1)) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, addr & 1); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb)); + tcg_temp_free_i32(tmp); + } + tcg_gen_movi_i32(cpu_R[15], addr & ~1); +} + +/* Set PC and Thumb state from var. var is marked as dead. */ +static inline void gen_bx(DisasContext *s, TCGv var) +{ + s->is_jmp = DISAS_UPDATE; + tcg_gen_andi_i32(cpu_R[15], var, ~1); + tcg_gen_andi_i32(var, var, 1); + store_cpu_field(var, thumb); +} + +/* Variant of store_reg which uses branch&exchange logic when storing + to r15 in ARM architecture v7 and above. The source must be a temporary + and will be marked as dead. */ +static inline void store_reg_bx(CPUARMState *env, DisasContext *s, + int reg, TCGv var) +{ + if (reg == 15 && ENABLE_ARCH_7) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + +/* Variant of store_reg which uses branch&exchange logic when storing + * to r15 in ARM architecture v5T and above. This is used for storing + * the results of a LDR/LDM/POP into r15, and corresponds to the cases + * in the ARM ARM which use the LoadWritePC() pseudocode function. */ +static inline void store_reg_from_load(CPUARMState *env, DisasContext *s, + int reg, TCGv var) +{ + if (reg == 15 && ENABLE_ARCH_5) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + +static inline TCGv gen_ld8s(TCGv addr, int index) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld8s(tmp, addr, index); + return tmp; +} +static inline TCGv gen_ld8u(TCGv addr, int index) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld8u(tmp, addr, index); + return tmp; +} +static inline TCGv gen_ld16s(TCGv addr, int index) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld16s(tmp, addr, index); + return tmp; +} +static inline TCGv gen_ld16u(TCGv addr, int index) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld16u(tmp, addr, index); + return tmp; +} +static inline TCGv gen_ld32(TCGv addr, int index) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, index); + return tmp; +} +static inline TCGv_i64 gen_ld64(TCGv addr, int index) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp, addr, index); + return tmp; +} +static inline void gen_st8(TCGv val, TCGv addr, int index) +{ + tcg_gen_qemu_st8(val, addr, index); + tcg_temp_free_i32(val); +} +static inline void gen_st16(TCGv val, TCGv addr, int index) +{ + tcg_gen_qemu_st16(val, addr, index); + tcg_temp_free_i32(val); +} +static inline void gen_st32(TCGv val, TCGv addr, int index) +{ + tcg_gen_qemu_st32(val, addr, index); + tcg_temp_free_i32(val); +} +static inline void gen_st64(TCGv_i64 val, TCGv addr, int index) +{ + tcg_gen_qemu_st64(val, addr, index); + tcg_temp_free_i64(val); +} + +static inline void gen_set_pc_im(uint32_t val) +{ + tcg_gen_movi_i32(cpu_R[15], val); +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static inline void gen_lookup_tb(DisasContext *s) +{ + tcg_gen_movi_i32(cpu_R[15], s->pc & ~1); + s->is_jmp = DISAS_UPDATE; +} + +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, + TCGv var) +{ + int val, rm, shift, shiftop; + TCGv offset; + + if (!(insn & (1 << 25))) { + /* immediate */ + val = insn & 0xfff; + if (!(insn & (1 << 23))) + val = -val; + if (val != 0) + tcg_gen_addi_i32(var, var, val); + } else { + /* shift/register */ + rm = (insn) & 0xf; + shift = (insn >> 7) & 0x1f; + shiftop = (insn >> 5) & 3; + offset = load_reg(s, rm); + gen_arm_shift_im(offset, shiftop, shift, 0); + if (!(insn & (1 << 23))) + tcg_gen_sub_i32(var, var, offset); + else + tcg_gen_add_i32(var, var, offset); + tcg_temp_free_i32(offset); + } +} + +static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, + int extra, TCGv var) +{ + int val, rm; + TCGv offset; + + if (insn & (1 << 22)) { + /* immediate */ + val = (insn & 0xf) | ((insn >> 4) & 0xf0); + if (!(insn & (1 << 23))) + val = -val; + val += extra; + if (val != 0) + tcg_gen_addi_i32(var, var, val); + } else { + /* register */ + if (extra) + tcg_gen_addi_i32(var, var, extra); + rm = (insn) & 0xf; + offset = load_reg(s, rm); + if (!(insn & (1 << 23))) + tcg_gen_sub_i32(var, var, offset); + else + tcg_gen_add_i32(var, var, offset); + tcg_temp_free_i32(offset); + } +} + +static TCGv_ptr get_fpstatus_ptr(int neon) +{ + TCGv_ptr statusptr = tcg_temp_new_ptr(); + int offset; + if (neon) { + offset = offsetof(CPUARMState, vfp.standard_fp_status); + } else { + offset = offsetof(CPUARMState, vfp.fp_status); + } + tcg_gen_addi_ptr(statusptr, cpu_env, offset); + return statusptr; +} + +#define VFP_OP2(name) \ +static inline void gen_vfp_##name(int dp) \ +{ \ + TCGv_ptr fpst = get_fpstatus_ptr(0); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \ + } \ + tcg_temp_free_ptr(fpst); \ +} + +VFP_OP2(add) +VFP_OP2(sub) +VFP_OP2(mul) +VFP_OP2(div) + +#undef VFP_OP2 + +static inline void gen_vfp_F1_mul(int dp) +{ + /* Like gen_vfp_mul() but put result in F1 */ + TCGv_ptr fpst = get_fpstatus_ptr(0); + if (dp) { + gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst); + } else { + gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst); + } + tcg_temp_free_ptr(fpst); +} + +static inline void gen_vfp_F1_neg(int dp) +{ + /* Like gen_vfp_neg() but put result in F1 */ + if (dp) { + gen_helper_vfp_negd(cpu_F1d, cpu_F0d); + } else { + gen_helper_vfp_negs(cpu_F1s, cpu_F0s); + } +} + +static inline void gen_vfp_abs(int dp) +{ + if (dp) + gen_helper_vfp_absd(cpu_F0d, cpu_F0d); + else + gen_helper_vfp_abss(cpu_F0s, cpu_F0s); +} + +static inline void gen_vfp_neg(int dp) +{ + if (dp) + gen_helper_vfp_negd(cpu_F0d, cpu_F0d); + else + gen_helper_vfp_negs(cpu_F0s, cpu_F0s); +} + +static inline void gen_vfp_sqrt(int dp) +{ + if (dp) + gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env); + else + gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env); +} + +static inline void gen_vfp_cmp(int dp) +{ + if (dp) + gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env); + else + gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env); +} + +static inline void gen_vfp_cmpe(int dp) +{ + if (dp) + gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env); + else + gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env); +} + +static inline void gen_vfp_F1_ld0(int dp) +{ + if (dp) + tcg_gen_movi_i64(cpu_F1d, 0); + else + tcg_gen_movi_i32(cpu_F1s, 0); +} + +#define VFP_GEN_ITOF(name) \ +static inline void gen_vfp_##name(int dp, int neon) \ +{ \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ +} + +VFP_GEN_ITOF(uito) +VFP_GEN_ITOF(sito) +#undef VFP_GEN_ITOF + +#define VFP_GEN_FTOI(name) \ +static inline void gen_vfp_##name(int dp, int neon) \ +{ \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ +} + +VFP_GEN_FTOI(toui) +VFP_GEN_FTOI(touiz) +VFP_GEN_FTOI(tosi) +VFP_GEN_FTOI(tosiz) +#undef VFP_GEN_FTOI + +#define VFP_GEN_FIX(name) \ +static inline void gen_vfp_##name(int dp, int shift, int neon) \ +{ \ + TCGv tmp_shift = tcg_const_i32(shift); \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \ + } \ + tcg_temp_free_i32(tmp_shift); \ + tcg_temp_free_ptr(statusptr); \ +} +VFP_GEN_FIX(tosh) +VFP_GEN_FIX(tosl) +VFP_GEN_FIX(touh) +VFP_GEN_FIX(toul) +VFP_GEN_FIX(shto) +VFP_GEN_FIX(slto) +VFP_GEN_FIX(uhto) +VFP_GEN_FIX(ulto) +#undef VFP_GEN_FIX + +static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr) +{ + if (dp) + tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s)); + else + tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s)); +} + +static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr) +{ + if (dp) + tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s)); + else + tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s)); +} + +static inline long +vfp_reg_offset (int dp, int reg) +{ + if (dp) + return offsetof(CPUARMState, vfp.regs[reg]); + else if (reg & 1) { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.upper); + } else { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.lower); + } +} + +/* Return the offset of a 32-bit piece of a NEON register. + zero is the least significant end of the register. */ +static inline long +neon_reg_offset (int reg, int n) +{ + int sreg; + sreg = reg * 2 + n; + return vfp_reg_offset(0, sreg); +} + +static TCGv neon_load_reg(int reg, int pass) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass)); + return tmp; +} + +static void neon_store_reg(int reg, int pass, TCGv var) +{ + tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); + tcg_temp_free_i32(var); +} + +static inline void neon_load_reg64(TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); +} + +static inline void neon_store_reg64(TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); +} + +#define tcg_gen_ld_f32 tcg_gen_ld_i32 +#define tcg_gen_ld_f64 tcg_gen_ld_i64 +#define tcg_gen_st_f32 tcg_gen_st_i32 +#define tcg_gen_st_f64 tcg_gen_st_i64 + +static inline void gen_mov_F0_vreg(int dp, int reg) +{ + if (dp) + tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_F1_vreg(int dp, int reg) +{ + if (dp) + tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_vreg_F0(int dp, int reg) +{ + if (dp) + tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); +} + +#define ARM_CP_RW_BIT (1 << 20) + +static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline TCGv iwmmxt_load_creg(int reg) +{ + TCGv var = tcg_temp_new_i32(); + tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + return var; +} + +static inline void iwmmxt_store_creg(int reg, TCGv var) +{ + tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + tcg_temp_free_i32(var); +} + +static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) +{ + iwmmxt_store_reg(cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_movq_M0_wRn(int rn) +{ + iwmmxt_load_reg(cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_orq_M0_wRn(int rn) +{ + iwmmxt_load_reg(cpu_V1, rn); + tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1); +} + +static inline void gen_op_iwmmxt_andq_M0_wRn(int rn) +{ + iwmmxt_load_reg(cpu_V1, rn); + tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1); +} + +static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn) +{ + iwmmxt_load_reg(cpu_V1, rn); + tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1); +} + +#define IWMMXT_OP(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ +{ \ + iwmmxt_load_reg(cpu_V1, rn); \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \ +} + +#define IWMMXT_OP_ENV(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ +{ \ + iwmmxt_load_reg(cpu_V1, rn); \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \ +} + +#define IWMMXT_OP_ENV_SIZE(name) \ +IWMMXT_OP_ENV(name##b) \ +IWMMXT_OP_ENV(name##w) \ +IWMMXT_OP_ENV(name##l) + +#define IWMMXT_OP_ENV1(name) \ +static inline void gen_op_iwmmxt_##name##_M0(void) \ +{ \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \ +} + +IWMMXT_OP(maddsq) +IWMMXT_OP(madduq) +IWMMXT_OP(sadb) +IWMMXT_OP(sadw) +IWMMXT_OP(mulslw) +IWMMXT_OP(mulshw) +IWMMXT_OP(mululw) +IWMMXT_OP(muluhw) +IWMMXT_OP(macsw) +IWMMXT_OP(macuw) + +IWMMXT_OP_ENV_SIZE(unpackl) +IWMMXT_OP_ENV_SIZE(unpackh) + +IWMMXT_OP_ENV1(unpacklub) +IWMMXT_OP_ENV1(unpackluw) +IWMMXT_OP_ENV1(unpacklul) +IWMMXT_OP_ENV1(unpackhub) +IWMMXT_OP_ENV1(unpackhuw) +IWMMXT_OP_ENV1(unpackhul) +IWMMXT_OP_ENV1(unpacklsb) +IWMMXT_OP_ENV1(unpacklsw) +IWMMXT_OP_ENV1(unpacklsl) +IWMMXT_OP_ENV1(unpackhsb) +IWMMXT_OP_ENV1(unpackhsw) +IWMMXT_OP_ENV1(unpackhsl) + +IWMMXT_OP_ENV_SIZE(cmpeq) +IWMMXT_OP_ENV_SIZE(cmpgtu) +IWMMXT_OP_ENV_SIZE(cmpgts) + +IWMMXT_OP_ENV_SIZE(mins) +IWMMXT_OP_ENV_SIZE(minu) +IWMMXT_OP_ENV_SIZE(maxs) +IWMMXT_OP_ENV_SIZE(maxu) + +IWMMXT_OP_ENV_SIZE(subn) +IWMMXT_OP_ENV_SIZE(addn) +IWMMXT_OP_ENV_SIZE(subu) +IWMMXT_OP_ENV_SIZE(addu) +IWMMXT_OP_ENV_SIZE(subs) +IWMMXT_OP_ENV_SIZE(adds) + +IWMMXT_OP_ENV(avgb0) +IWMMXT_OP_ENV(avgb1) +IWMMXT_OP_ENV(avgw0) +IWMMXT_OP_ENV(avgw1) + +//IWMMXT_OP(msadb) + +IWMMXT_OP_ENV(packuw) +IWMMXT_OP_ENV(packul) +IWMMXT_OP_ENV(packuq) +IWMMXT_OP_ENV(packsw) +IWMMXT_OP_ENV(packsl) +IWMMXT_OP_ENV(packsq) + +static void gen_op_iwmmxt_set_mup(void) +{ + TCGv tmp; + tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tmp, tmp, 2); + store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_set_cup(void) +{ + TCGv tmp; + tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tmp, tmp, 1); + store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_setpsr_nz(void) +{ + TCGv tmp = tcg_temp_new_i32(); + gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0); + store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); +} + +static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) +{ + iwmmxt_load_reg(cpu_V1, rn); + tcg_gen_ext32u_i64(cpu_V1, cpu_V1); + tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); +} + +static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest) +{ + int rd; + uint32_t offset; + TCGv tmp; + + rd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + + offset = (insn & 0xff) << ((insn >> 7) & 2); + if (insn & (1 << 24)) { + /* Pre indexed */ + if (insn & (1 << 23)) + tcg_gen_addi_i32(tmp, tmp, offset); + else + tcg_gen_addi_i32(tmp, tmp, -offset); + tcg_gen_mov_i32(dest, tmp); + if (insn & (1 << 21)) + store_reg(s, rd, tmp); + else + tcg_temp_free_i32(tmp); + } else if (insn & (1 << 21)) { + /* Post indexed */ + tcg_gen_mov_i32(dest, tmp); + if (insn & (1 << 23)) + tcg_gen_addi_i32(tmp, tmp, offset); + else + tcg_gen_addi_i32(tmp, tmp, -offset); + store_reg(s, rd, tmp); + } else if (!(insn & (1 << 23))) + return 1; + return 0; +} + +static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) +{ + int rd = (insn >> 0) & 0xf; + TCGv tmp; + + if (insn & (1 << 8)) { + if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { + return 1; + } else { + tmp = iwmmxt_load_creg(rd); + } + } else { + tmp = tcg_temp_new_i32(); + iwmmxt_load_reg(cpu_V0, rd); + tcg_gen_trunc_i64_i32(tmp, cpu_V0); + } + tcg_gen_andi_i32(tmp, tmp, mask); + tcg_gen_mov_i32(dest, tmp); + tcg_temp_free_i32(tmp); + return 0; +} + +/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) +{ + int rd, wrd; + int rdhi, rdlo, rd0, rd1, i; + TCGv addr; + TCGv tmp, tmp2, tmp3; + + if ((insn & 0x0e000e00) == 0x0c000000) { + if ((insn & 0x0fe00ff0) == 0x0c400000) { + wrd = insn & 0xf; + rdlo = (insn >> 12) & 0xf; + rdhi = (insn >> 16) & 0xf; + if (insn & ARM_CP_RW_BIT) { /* TMRRC */ + iwmmxt_load_reg(cpu_V0, wrd); + tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + } else { /* TMCRR */ + tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); + iwmmxt_store_reg(cpu_V0, wrd); + gen_op_iwmmxt_set_mup(); + } + return 0; + } + + wrd = (insn >> 12) & 0xf; + addr = tcg_temp_new_i32(); + if (gen_iwmmxt_address(s, insn, addr)) { + tcg_temp_free_i32(addr); + return 1; + } + if (insn & ARM_CP_RW_BIT) { + if ((insn >> 28) == 0xf) { /* WLDRW wCx */ + tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s)); + iwmmxt_store_creg(wrd, tmp); + } else { + i = 1; + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WLDRD */ + tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s)); + i = 0; + } else { /* WLDRW wRd */ + tmp = gen_ld32(addr, IS_USER(s)); + } + } else { + if (insn & (1 << 22)) { /* WLDRH */ + tmp = gen_ld16u(addr, IS_USER(s)); + } else { /* WLDRB */ + tmp = gen_ld8u(addr, IS_USER(s)); + } + } + if (i) { + tcg_gen_extu_i32_i64(cpu_M0, tmp); + tcg_temp_free_i32(tmp); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + } + } else { + if ((insn >> 28) == 0xf) { /* WSTRW wCx */ + tmp = iwmmxt_load_creg(wrd); + gen_st32(tmp, addr, IS_USER(s)); + } else { + gen_op_iwmmxt_movq_M0_wRn(wrd); + tmp = tcg_temp_new_i32(); + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WSTRD */ + tcg_temp_free_i32(tmp); + tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s)); + } else { /* WSTRW wRd */ + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + gen_st32(tmp, addr, IS_USER(s)); + } + } else { + if (insn & (1 << 22)) { /* WSTRH */ + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + gen_st16(tmp, addr, IS_USER(s)); + } else { /* WSTRB */ + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + gen_st8(tmp, addr, IS_USER(s)); + } + } + } + } + tcg_temp_free_i32(addr); + return 0; + } + + if ((insn & 0x0f000000) != 0x0e000000) + return 1; + + switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { + case 0x000: /* WOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_orq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x011: /* TMCR */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + switch (wrd) { + case ARM_IWMMXT_wCID: + case ARM_IWMMXT_wCASF: + break; + case ARM_IWMMXT_wCon: + gen_op_iwmmxt_set_cup(); + /* Fall through. */ + case ARM_IWMMXT_wCSSF: + tmp = iwmmxt_load_creg(wrd); + tmp2 = load_reg(s, rd); + tcg_gen_andc_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + iwmmxt_store_creg(wrd, tmp); + break; + case ARM_IWMMXT_wCGR0: + case ARM_IWMMXT_wCGR1: + case ARM_IWMMXT_wCGR2: + case ARM_IWMMXT_wCGR3: + gen_op_iwmmxt_set_cup(); + tmp = load_reg(s, rd); + iwmmxt_store_creg(wrd, tmp); + break; + default: + return 1; + } + break; + case 0x100: /* WXOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_xorq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x111: /* TMRC */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = iwmmxt_load_creg(wrd); + store_reg(s, rd, tmp); + break; + case 0x300: /* WANDN */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tcg_gen_neg_i64(cpu_M0, cpu_M0); + gen_op_iwmmxt_andq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x200: /* WAND */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_andq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x810: case 0xa10: /* WMADD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_maddsq_M0_wRn(rd1); + else + gen_op_iwmmxt_madduq_M0_wRn(rd1); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpacklb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_unpacklw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_unpackll_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpackhb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_unpackhw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_unpackhl_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 22)) + gen_op_iwmmxt_sadw_M0_wRn(rd1); + else + gen_op_iwmmxt_sadb_M0_wRn(rd1); + if (!(insn & (1 << 20))) + gen_op_iwmmxt_addl_M0_wRn(wrd); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_mulshw_M0_wRn(rd1); + else + gen_op_iwmmxt_mulslw_M0_wRn(rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_muluhw_M0_wRn(rd1); + else + gen_op_iwmmxt_mululw_M0_wRn(rd1); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_macsw_M0_wRn(rd1); + else + gen_op_iwmmxt_macuw_M0_wRn(rd1); + if (!(insn & (1 << 20))) { + iwmmxt_load_reg(cpu_V1, wrd); + tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_cmpeqb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_cmpeqw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_cmpeql_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 22)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgw1_M0_wRn(rd1); + else + gen_op_iwmmxt_avgw0_M0_wRn(rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgb1_M0_wRn(rd1); + else + gen_op_iwmmxt_avgb0_M0_wRn(rd1); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); + tcg_gen_andi_i32(tmp, tmp, 7); + iwmmxt_load_reg(cpu_V1, rd1); + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + gen_op_iwmmxt_movq_M0_wRn(wrd); + switch ((insn >> 6) & 3) { + case 0: + tmp2 = tcg_const_i32(0xff); + tmp3 = tcg_const_i32((insn & 7) << 3); + break; + case 1: + tmp2 = tcg_const_i32(0xffff); + tmp3 = tcg_const_i32((insn & 3) << 4); + break; + case 2: + tmp2 = tcg_const_i32(0xffffffff); + tmp3 = tcg_const_i32((insn & 1) << 5); + break; + default: + TCGV_UNUSED(tmp2); + TCGV_UNUSED(tmp3); + } + gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); + tcg_temp_free(tmp3); + tcg_temp_free(tmp2); + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + if (rd == 15 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(wrd); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + if (insn & 8) { + tcg_gen_ext8s_i32(tmp, tmp); + } else { + tcg_gen_andi_i32(tmp, tmp, 0xff); + } + break; + case 1: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + if (insn & 8) { + tcg_gen_ext16s_i32(tmp, tmp); + } else { + tcg_gen_andi_i32(tmp, tmp, 0xffff); + } + break; + case 2: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ + if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0); + break; + case 1: + tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4); + break; + case 2: + tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12); + break; + } + tcg_gen_shli_i32(tmp, tmp, 28); + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + break; + case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + switch ((insn >> 6) & 3) { + case 0: + gen_helper_iwmmxt_bcstb(cpu_M0, tmp); + break; + case 1: + gen_helper_iwmmxt_bcstw(cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_bcstl(cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tmp2, tmp2, 4); + tcg_gen_and_i32(tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tmp2, tmp2, 8); + tcg_gen_and_i32(tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + } + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0); + break; + case 1: + gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0); + break; + case 2: + gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tmp2, tmp2, 4); + tcg_gen_or_i32(tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tmp2, tmp2, 8); + tcg_gen_or_i32(tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp, tmp, tmp2); + break; + } + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ + rd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_msbb(tmp, cpu_M0); + break; + case 1: + gen_helper_iwmmxt_msbw(tmp, cpu_M0); + break; + case 2: + gen_helper_iwmmxt_msbl(tmp, cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ + case 0x906: case 0xb06: case 0xd06: case 0xf06: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ + case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsb_M0(); + else + gen_op_iwmmxt_unpacklub_M0(); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsw_M0(); + else + gen_op_iwmmxt_unpackluw_M0(); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsl_M0(); + else + gen_op_iwmmxt_unpacklul_M0(); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ + case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsb_M0(); + else + gen_op_iwmmxt_unpackhub_M0(); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsw_M0(); + else + gen_op_iwmmxt_unpackhuw_M0(); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsl_M0(); + else + gen_op_iwmmxt_unpackhul_M0(); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ + case 0x214: case 0x614: case 0xa14: case 0xe14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ + case 0x014: case 0x414: case 0x814: case 0xc14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ + case 0x114: case 0x514: case 0x914: case 0xd14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ + case 0x314: case 0x714: case 0xb14: case 0xf14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 1: + if (gen_iwmmxt_shift(insn, 0xf, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ + case 0x916: case 0xb16: case 0xd16: case 0xf16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsb_M0_wRn(rd1); + else + gen_op_iwmmxt_minub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsw_M0_wRn(rd1); + else + gen_op_iwmmxt_minuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsl_M0_wRn(rd1); + else + gen_op_iwmmxt_minul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ + case 0x816: case 0xa16: case 0xc16: case 0xe16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsb_M0_wRn(rd1); + else + gen_op_iwmmxt_maxub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsw_M0_wRn(rd1); + else + gen_op_iwmmxt_maxuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsl_M0_wRn(rd1); + else + gen_op_iwmmxt_maxul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ + case 0x402: case 0x502: case 0x602: case 0x702: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_const_i32((insn >> 20) & 3); + iwmmxt_load_reg(cpu_V1, rd1); + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); + tcg_temp_free(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ + case 0x41a: case 0x51a: case 0x61a: case 0x71a: + case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: + case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_subnb_M0_wRn(rd1); + break; + case 0x1: + gen_op_iwmmxt_subub_M0_wRn(rd1); + break; + case 0x3: + gen_op_iwmmxt_subsb_M0_wRn(rd1); + break; + case 0x4: + gen_op_iwmmxt_subnw_M0_wRn(rd1); + break; + case 0x5: + gen_op_iwmmxt_subuw_M0_wRn(rd1); + break; + case 0x7: + gen_op_iwmmxt_subsw_M0_wRn(rd1); + break; + case 0x8: + gen_op_iwmmxt_subnl_M0_wRn(rd1); + break; + case 0x9: + gen_op_iwmmxt_subul_M0_wRn(rd1); + break; + case 0xb: + gen_op_iwmmxt_subsl_M0_wRn(rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ + case 0x41e: case 0x51e: case 0x61e: case 0x71e: + case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: + case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); + gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); + tcg_temp_free(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ + case 0x418: case 0x518: case 0x618: case 0x718: + case 0x818: case 0x918: case 0xa18: case 0xb18: + case 0xc18: case 0xd18: case 0xe18: case 0xf18: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_addnb_M0_wRn(rd1); + break; + case 0x1: + gen_op_iwmmxt_addub_M0_wRn(rd1); + break; + case 0x3: + gen_op_iwmmxt_addsb_M0_wRn(rd1); + break; + case 0x4: + gen_op_iwmmxt_addnw_M0_wRn(rd1); + break; + case 0x5: + gen_op_iwmmxt_adduw_M0_wRn(rd1); + break; + case 0x7: + gen_op_iwmmxt_addsw_M0_wRn(rd1); + break; + case 0x8: + gen_op_iwmmxt_addnl_M0_wRn(rd1); + break; + case 0x9: + gen_op_iwmmxt_addul_M0_wRn(rd1); + break; + case 0xb: + gen_op_iwmmxt_addsl_M0_wRn(rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ + case 0x408: case 0x508: case 0x608: case 0x708: + case 0x808: case 0x908: case 0xa08: case 0xb08: + case 0xc08: case 0xd08: case 0xe08: case 0xf08: + if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsw_M0_wRn(rd1); + else + gen_op_iwmmxt_packuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsl_M0_wRn(rd1); + else + gen_op_iwmmxt_packul_M0_wRn(rd1); + break; + case 3: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsq_M0_wRn(rd1); + else + gen_op_iwmmxt_packuq_M0_wRn(rd1); + break; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); + break; + case 0x201: case 0x203: case 0x205: case 0x207: + case 0x209: case 0x20b: case 0x20d: case 0x20f: + case 0x211: case 0x213: case 0x215: case 0x217: + case 0x219: case 0x21b: case 0x21d: case 0x21f: + wrd = (insn >> 5) & 0xf; + rd0 = (insn >> 12) & 0xf; + rd1 = (insn >> 0) & 0xf; + if (rd0 == 0xf || rd1 == 0xf) + return 1; + gen_op_iwmmxt_movq_M0_wRn(wrd); + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* TMIA */ + gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0x8: /* TMIAPH */ + gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); + break; + default: + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + return 1; + } + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + default: + return 1; + } + + return 0; +} + +/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) +{ + int acc, rd0, rd1, rdhi, rdlo; + TCGv tmp, tmp2; + + if ((insn & 0x0ff00f10) == 0x0e200010) { + /* Multiply with Internal Accumulate Format */ + rd0 = (insn >> 12) & 0xf; + rd1 = insn & 0xf; + acc = (insn >> 5) & 7; + + if (acc != 0) + return 1; + + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* MIA */ + gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0x8: /* MIAPH */ + gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0xc: /* MIABB */ + case 0xd: /* MIABT */ + case 0xe: /* MIATB */ + case 0xf: /* MIATT */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); + break; + default: + return 1; + } + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + + gen_op_iwmmxt_movq_wRn_M0(acc); + return 0; + } + + if ((insn & 0x0fe00ff8) == 0x0c400000) { + /* Internal Accumulator Access Format */ + rdhi = (insn >> 16) & 0xf; + rdlo = (insn >> 12) & 0xf; + acc = insn & 7; + + if (acc != 0) + return 1; + + if (insn & ARM_CP_RW_BIT) { /* MRA */ + iwmmxt_load_reg(cpu_V0, acc); + tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); + } else { /* MAR */ + tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); + iwmmxt_store_reg(cpu_V0, acc); + } + return 0; + } + + return 1; +} + +/* Disassemble system coprocessor instruction. Return nonzero if + instruction is not defined. */ +static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) +{ + TCGv tmp, tmp2; + uint32_t rd = (insn >> 12) & 0xf; + uint32_t cp = (insn >> 8) & 0xf; + if (IS_USER(s)) { + return 1; + } + + if (insn & ARM_CP_RW_BIT) { + if (!env->cp[cp].cp_read) + return 1; + gen_set_pc_im(s->pc); + tmp = tcg_temp_new_i32(); + tmp2 = tcg_const_i32(insn); + gen_helper_get_cp(tmp, cpu_env, tmp2); + tcg_temp_free(tmp2); + store_reg(s, rd, tmp); + } else { + if (!env->cp[cp].cp_write) + return 1; + gen_set_pc_im(s->pc); + tmp = load_reg(s, rd); + tmp2 = tcg_const_i32(insn); + gen_helper_set_cp(cpu_env, tmp2, tmp); + tcg_temp_free(tmp2); + tcg_temp_free_i32(tmp); + } + return 0; +} + +static int cp15_user_ok(CPUARMState *env, uint32_t insn) +{ + int cpn = (insn >> 16) & 0xf; + int cpm = insn & 0xf; + int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38); + + if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) { + /* Performance monitor registers fall into three categories: + * (a) always UNDEF in usermode + * (b) UNDEF only if PMUSERENR.EN is 0 + * (c) always read OK and UNDEF on write (PMUSERENR only) + */ + if ((cpm == 12 && (op < 6)) || + (cpm == 13 && (op < 3))) { + return env->cp15.c9_pmuserenr; + } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) { + /* PMUSERENR, read only */ + return 1; + } + return 0; + } + + if (cpn == 13 && cpm == 0) { + /* TLS register. */ + if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT))) + return 1; + } + return 0; +} + +static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd) +{ + TCGv tmp; + int cpn = (insn >> 16) & 0xf; + int cpm = insn & 0xf; + int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38); + + if (!arm_feature(env, ARM_FEATURE_V6K)) + return 0; + + if (!(cpn == 13 && cpm == 0)) + return 0; + + if (insn & ARM_CP_RW_BIT) { + switch (op) { + case 2: + tmp = load_cpu_field(cp15.c13_tls1); + break; + case 3: + tmp = load_cpu_field(cp15.c13_tls2); + break; + case 4: + tmp = load_cpu_field(cp15.c13_tls3); + break; + default: + return 0; + } + store_reg(s, rd, tmp); + + } else { + tmp = load_reg(s, rd); + switch (op) { + case 2: + store_cpu_field(tmp, cp15.c13_tls1); + break; + case 3: + store_cpu_field(tmp, cp15.c13_tls2); + break; + case 4: + store_cpu_field(tmp, cp15.c13_tls3); + break; + default: + tcg_temp_free_i32(tmp); + return 0; + } + } + return 1; +} + +/* Disassemble system coprocessor (cp15) instruction. Return nonzero if + instruction is not defined. */ +static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) +{ + uint32_t rd; + TCGv tmp, tmp2; + + /* M profile cores use memory mapped registers instead of cp15. */ + if (arm_feature(env, ARM_FEATURE_M)) + return 1; + + if ((insn & (1 << 25)) == 0) { + if (insn & (1 << 20)) { + /* mrrc */ + return 1; + } + /* mcrr. Used for block cache operations, so implement as no-op. */ + return 0; + } + if ((insn & (1 << 4)) == 0) { + /* cdp */ + return 1; + } + /* We special case a number of cp15 instructions which were used + * for things which are real instructions in ARMv7. This allows + * them to work in linux-user mode which doesn't provide functional + * get_cp15/set_cp15 helpers, and is more efficient anyway. + */ + switch ((insn & 0x0fff0fff)) { + case 0x0e070f90: + /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores). + * In v7, this must NOP. + */ + if (IS_USER(s)) { + return 1; + } + if (!arm_feature(env, ARM_FEATURE_V7)) { + /* Wait for interrupt. */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + } + return 0; + case 0x0e070f58: + /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI, + * so this is slightly over-broad. + */ + if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) { + /* Wait for interrupt. */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + return 0; + } + /* Otherwise continue to handle via helper function. + * In particular, on v7 and some v6 cores this is one of + * the VA-PA registers. + */ + break; + case 0x0e070f3d: + /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */ + if (arm_feature(env, ARM_FEATURE_V6)) { + return IS_USER(s) ? 1 : 0; + } + break; + case 0x0e070f95: /* 0,c7,c5,4 : ISB */ + case 0x0e070f9a: /* 0,c7,c10,4: DSB */ + case 0x0e070fba: /* 0,c7,c10,5: DMB */ + /* Barriers in both v6 and v7 */ + if (arm_feature(env, ARM_FEATURE_V6)) { + return 0; + } + break; + default: + break; + } + + if (IS_USER(s) && !cp15_user_ok(env, insn)) { + return 1; + } + + rd = (insn >> 12) & 0xf; + + if (cp15_tls_load_store(env, s, insn, rd)) + return 0; + + tmp2 = tcg_const_i32(insn); + if (insn & ARM_CP_RW_BIT) { + tmp = tcg_temp_new_i32(); + gen_helper_get_cp15(tmp, cpu_env, tmp2); + /* If the destination register is r15 then sets condition codes. */ + if (rd != 15) + store_reg(s, rd, tmp); + else + tcg_temp_free_i32(tmp); + } else { + tmp = load_reg(s, rd); + gen_helper_set_cp15(cpu_env, tmp2, tmp); + tcg_temp_free_i32(tmp); + /* Normally we would always end the TB here, but Linux + * arch/arm/mach-pxa/sleep.S expects two instructions following + * an MMU enable to execute from cache. Imitate this behaviour. */ + if (!arm_feature(env, ARM_FEATURE_XSCALE) || + (insn & 0x0fff0fff) != 0x0e010f10) + gen_lookup_tb(s); + } + tcg_temp_free_i32(tmp2); + return 0; +} + +#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) +#define VFP_SREG(insn, bigbit, smallbit) \ + ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) +#define VFP_DREG(reg, insn, bigbit, smallbit) do { \ + if (arm_feature(env, ARM_FEATURE_VFP3)) { \ + reg = (((insn) >> (bigbit)) & 0x0f) \ + | (((insn) >> ((smallbit) - 4)) & 0x10); \ + } else { \ + if (insn & (1 << (smallbit))) \ + return 1; \ + reg = ((insn) >> (bigbit)) & 0x0f; \ + }} while (0) + +#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22) +#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) +#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7) +#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) +#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) +#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) + +/* Move between integer and VFP cores. */ +static TCGv gen_vfp_mrs(void) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp, cpu_F0s); + return tmp; +} + +static void gen_vfp_msr(TCGv tmp) +{ + tcg_gen_mov_i32(cpu_F0s, tmp); + tcg_temp_free_i32(tmp); +} + +static void gen_neon_dup_u8(TCGv var, int shift) +{ + TCGv tmp = tcg_temp_new_i32(); + if (shift) + tcg_gen_shri_i32(var, var, shift); + tcg_gen_ext8u_i32(var, var); + tcg_gen_shli_i32(tmp, var, 8); + tcg_gen_or_i32(var, var, tmp); + tcg_gen_shli_i32(tmp, var, 16); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); +} + +static void gen_neon_dup_low16(TCGv var) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(var, var); + tcg_gen_shli_i32(tmp, var, 16); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); +} + +static void gen_neon_dup_high16(TCGv var) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_andi_i32(var, var, 0xffff0000); + tcg_gen_shri_i32(tmp, var, 16); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); +} + +static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size) +{ + /* Load a single Neon element and replicate into a 32 bit TCG reg */ + TCGv tmp; + switch (size) { + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + gen_neon_dup_u8(tmp, 0); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + gen_neon_dup_low16(tmp); + break; + case 2: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); + } + return tmp; +} + +/* Disassemble a VFP instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; + int dp, veclen; + TCGv addr; + TCGv tmp; + TCGv tmp2; + + if (!arm_feature(env, ARM_FEATURE_VFP)) + return 1; + + if (!s->vfp_enabled) { + /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */ + if ((insn & 0x0fe00fff) != 0x0ee00a10) + return 1; + rn = (insn >> 16) & 0xf; + if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC + && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) + return 1; + } + dp = ((insn & 0xf00) == 0xb00); + switch ((insn >> 24) & 0xf) { + case 0xe: + if (insn & (1 << 4)) { + /* single register transfer */ + rd = (insn >> 12) & 0xf; + if (dp) { + int size; + int pass; + + VFP_DREG_N(rn, insn); + if (insn & 0xf) + return 1; + if (insn & 0x00c00060 + && !arm_feature(env, ARM_FEATURE_NEON)) + return 1; + + pass = (insn >> 21) & 1; + if (insn & (1 << 22)) { + size = 0; + offset = ((insn >> 5) & 3) * 8; + } else if (insn & (1 << 5)) { + size = 1; + offset = (insn & (1 << 6)) ? 16 : 0; + } else { + size = 2; + offset = 0; + } + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + tmp = neon_load_reg(rn, pass); + switch (size) { + case 0: + if (offset) + tcg_gen_shri_i32(tmp, tmp, offset); + if (insn & (1 << 23)) + gen_uxtb(tmp); + else + gen_sxtb(tmp); + break; + case 1: + if (insn & (1 << 23)) { + if (offset) { + tcg_gen_shri_i32(tmp, tmp, 16); + } else { + gen_uxth(tmp); + } + } else { + if (offset) { + tcg_gen_sari_i32(tmp, tmp, 16); + } else { + gen_sxth(tmp); + } + } + break; + case 2: + break; + } + store_reg(s, rd, tmp); + } else { + /* arm->vfp */ + tmp = load_reg(s, rd); + if (insn & (1 << 23)) { + /* VDUP */ + if (size == 0) { + gen_neon_dup_u8(tmp, 0); + } else if (size == 1) { + gen_neon_dup_low16(tmp); + } + for (n = 0; n <= pass * 2; n++) { + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + neon_store_reg(rn, n, tmp2); + } + neon_store_reg(rn, n, tmp); + } else { + /* VMOV */ + switch (size) { + case 0: + tmp2 = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp2, tmp, offset, 0xff); + tcg_temp_free_i32(tmp2); + break; + case 1: + tmp2 = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp2, tmp, offset, 0xffff); + tcg_temp_free_i32(tmp2); + break; + case 2: + break; + } + neon_store_reg(rn, pass, tmp); + } + } + } else { /* !dp */ + if ((insn & 0x6f) != 0x00) + return 1; + rn = VFP_SREG_N(insn); + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (insn & (1 << 21)) { + /* system register */ + rn >>= 1; + + switch (rn) { + case ARM_VFP_FPSID: + /* VFP2 allows access to FSID from userspace. + VFP3 restricts all id registers to privileged + accesses. */ + if (IS_USER(s) + && arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + /* Not present in VFP3. */ + if (IS_USER(s) + || arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPSCR: + if (rd == 15) { + tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); + tcg_gen_andi_i32(tmp, tmp, 0xf0000000); + } else { + tmp = tcg_temp_new_i32(); + gen_helper_vfp_get_fpscr(tmp, cpu_env); + } + break; + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + if (IS_USER(s) + || !arm_feature(env, ARM_FEATURE_MVFR)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + gen_mov_F0_vreg(0, rn); + tmp = gen_vfp_mrs(); + } + if (rd == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + } else { + store_reg(s, rd, tmp); + } + } else { + /* arm->vfp */ + tmp = load_reg(s, rd); + if (insn & (1 << 21)) { + rn >>= 1; + /* system register */ + switch (rn) { + case ARM_VFP_FPSID: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + gen_helper_vfp_set_fpscr(cpu_env, tmp); + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + /* TODO: VFP subarchitecture support. + * For now, keep the EN bit only */ + tcg_gen_andi_i32(tmp, tmp, 1 << 30); + store_cpu_field(tmp, vfp.xregs[rn]); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + store_cpu_field(tmp, vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rn); + } + } + } + } else { + /* data processing */ + /* The opcode is in bits 23, 21, 20 and 6. */ + op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); + if (dp) { + if (op == 15) { + /* rn is opcode */ + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); + } else { + /* rn is register number */ + VFP_DREG_N(rn, insn); + } + + if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) { + /* Integer or single precision destination. */ + rd = VFP_SREG_D(insn); + } else { + VFP_DREG_D(rd, insn); + } + if (op == 15 && + (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) { + /* VCVT from int is always from S reg regardless of dp bit. + * VCVT with immediate frac_bits has same format as SREG_M + */ + rm = VFP_SREG_M(insn); + } else { + VFP_DREG_M(rm, insn); + } + } else { + rn = VFP_SREG_N(insn); + if (op == 15 && rn == 15) { + /* Double precision destination. */ + VFP_DREG_D(rd, insn); + } else { + rd = VFP_SREG_D(insn); + } + /* NB that we implicitly rely on the encoding for the frac_bits + * in VCVT of fixed to float being the same as that of an SREG_M + */ + rm = VFP_SREG_M(insn); + } + + veclen = s->vec_len; + if (op == 15 && rn > 3) + veclen = 0; + + /* Shut up compiler warnings. */ + delta_m = 0; + delta_d = 0; + bank_mask = 0; + + if (veclen > 0) { + if (dp) + bank_mask = 0xc; + else + bank_mask = 0x18; + + /* Figure out what type of vector operation this is. */ + if ((rd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + if (dp) + delta_d = (s->vec_stride >> 1) + 1; + else + delta_d = s->vec_stride + 1; + + if ((rm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + /* Load the initial operands. */ + if (op == 15) { + switch (rn) { + case 16: + case 17: + /* Integer source */ + gen_mov_F0_vreg(0, rm); + break; + case 8: + case 9: + /* Compare */ + gen_mov_F0_vreg(dp, rd); + gen_mov_F1_vreg(dp, rm); + break; + case 10: + case 11: + /* Compare with zero */ + gen_mov_F0_vreg(dp, rd); + gen_vfp_F1_ld0(dp); + break; + case 20: + case 21: + case 22: + case 23: + case 28: + case 29: + case 30: + case 31: + /* Source and destination the same. */ + gen_mov_F0_vreg(dp, rd); + break; + case 4: + case 5: + case 6: + case 7: + /* VCVTB, VCVTT: only present with the halfprec extension, + * UNPREDICTABLE if bit 8 is set (we choose to UNDEF) + */ + if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) { + return 1; + } + /* Otherwise fall through */ + default: + /* One source operand. */ + gen_mov_F0_vreg(dp, rm); + break; + } + } else { + /* Two source operands. */ + gen_mov_F0_vreg(dp, rn); + gen_mov_F1_vreg(dp, rm); + } + + for (;;) { + /* Perform the calculation. */ + switch (op) { + case 0: /* VMLA: fd + (fn * fm) */ + /* Note that order of inputs to the add matters for NaNs */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 1: /* VMLS: fd + -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 2: /* VNMLS: -fd + (fn * fm) */ + /* Note that it isn't valid to replace (-A + B) with (B - A) + * or similar plausible looking simplifications + * because this will give wrong results for NaNs. + */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_neg(dp); + gen_vfp_add(dp); + break; + case 3: /* VNMLA: -fd + -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_neg(dp); + gen_vfp_add(dp); + break; + case 4: /* mul: fn * fm */ + gen_vfp_mul(dp); + break; + case 5: /* nmul: -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_neg(dp); + break; + case 6: /* add: fn + fm */ + gen_vfp_add(dp); + break; + case 7: /* sub: fn - fm */ + gen_vfp_sub(dp); + break; + case 8: /* div: fn / fm */ + gen_vfp_div(dp); + break; + case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ + case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ + case 12: /* VFMA : fd = muladd( fd, fn, fm) */ + case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (!arm_feature(env, ARM_FEATURE_VFP4)) { + return 1; + } + if (dp) { + TCGv_ptr fpst; + TCGv_i64 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(cpu_F0d, cpu_F0d); + } + frd = tcg_temp_new_i64(); + tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d, + cpu_F1d, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(frd); + } else { + TCGv_ptr fpst; + TCGv_i32 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(cpu_F0s, cpu_F0s); + } + frd = tcg_temp_new_i32(); + tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + gen_helper_vfp_negs(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladds(cpu_F0s, cpu_F0s, + cpu_F1s, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(frd); + } + break; + case 14: /* fconst */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + + n = (insn << 12) & 0x80000000; + i = ((insn >> 12) & 0x70) | (insn & 0xf); + if (dp) { + if (i & 0x40) + i |= 0x3f80; + else + i |= 0x4000; + n |= i << 16; + tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32); + } else { + if (i & 0x40) + i |= 0x780; + else + i |= 0x800; + n |= i << 19; + tcg_gen_movi_i32(cpu_F0s, n); + } + break; + case 15: /* extension space */ + switch (rn) { + case 0: /* cpy */ + /* no-op */ + break; + case 1: /* abs */ + gen_vfp_abs(dp); + break; + case 2: /* neg */ + gen_vfp_neg(dp); + break; + case 3: /* sqrt */ + gen_vfp_sqrt(dp); + break; + case 4: /* vcvtb.f32.f16 */ + tmp = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp, tmp); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + tcg_temp_free_i32(tmp); + break; + case 5: /* vcvtt.f32.f16 */ + tmp = gen_vfp_mrs(); + tcg_gen_shri_i32(tmp, tmp, 16); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + tcg_temp_free_i32(tmp); + break; + case 6: /* vcvtb.f16.f32 */ + tmp = tcg_temp_new_i32(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + gen_vfp_msr(tmp); + break; + case 7: /* vcvtt.f16.f32 */ + tmp = tcg_temp_new_i32(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp, tmp, 16); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp2, tmp2); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + gen_vfp_msr(tmp); + break; + case 8: /* cmp */ + gen_vfp_cmp(dp); + break; + case 9: /* cmpe */ + gen_vfp_cmpe(dp); + break; + case 10: /* cmpz */ + gen_vfp_cmp(dp); + break; + case 11: /* cmpez */ + gen_vfp_F1_ld0(dp); + gen_vfp_cmpe(dp); + break; + case 15: /* single<->double conversion */ + if (dp) + gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env); + else + gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env); + break; + case 16: /* fuito */ + gen_vfp_uito(dp, 0); + break; + case 17: /* fsito */ + gen_vfp_sito(dp, 0); + break; + case 20: /* fshto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_shto(dp, 16 - rm, 0); + break; + case 21: /* fslto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_slto(dp, 32 - rm, 0); + break; + case 22: /* fuhto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_uhto(dp, 16 - rm, 0); + break; + case 23: /* fulto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_ulto(dp, 32 - rm, 0); + break; + case 24: /* ftoui */ + gen_vfp_toui(dp, 0); + break; + case 25: /* ftouiz */ + gen_vfp_touiz(dp, 0); + break; + case 26: /* ftosi */ + gen_vfp_tosi(dp, 0); + break; + case 27: /* ftosiz */ + gen_vfp_tosiz(dp, 0); + break; + case 28: /* ftosh */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_tosh(dp, 16 - rm, 0); + break; + case 29: /* ftosl */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_tosl(dp, 32 - rm, 0); + break; + case 30: /* ftouh */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_touh(dp, 16 - rm, 0); + break; + case 31: /* ftoul */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_toul(dp, 32 - rm, 0); + break; + default: /* undefined */ + return 1; + } + break; + default: /* undefined */ + return 1; + } + + /* Write back the result. */ + if (op == 15 && (rn >= 8 && rn <= 11)) + ; /* Comparison, do nothing. */ + else if (op == 15 && dp && ((rn & 0x1c) == 0x18)) + /* VCVT double to int: always integer result. */ + gen_mov_vreg_F0(0, rd); + else if (op == 15 && rn == 15) + /* conversion */ + gen_mov_vreg_F0(!dp, rd); + else + gen_mov_vreg_F0(dp, rd); + + /* break out of the loop if we have finished */ + if (veclen == 0) + break; + + if (op == 15 && delta_m == 0) { + /* single source one-many */ + while (veclen--) { + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + gen_mov_vreg_F0(dp, rd); + } + break; + } + /* Setup the next operands. */ + veclen--; + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + + if (op == 15) { + /* One source operand. */ + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F0_vreg(dp, rm); + } else { + /* Two source operands. */ + rn = ((rn + delta_d) & (bank_mask - 1)) + | (rn & bank_mask); + gen_mov_F0_vreg(dp, rn); + if (delta_m) { + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F1_vreg(dp, rm); + } + } + } + } + break; + case 0xc: + case 0xd: + if ((insn & 0x03e00000) == 0x00400000) { + /* two-register transfer */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (dp) { + VFP_DREG_M(rm, insn); + } else { + rm = VFP_SREG_M(insn); + } + + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (dp) { + gen_mov_F0_vreg(0, rm * 2); + tmp = gen_vfp_mrs(); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(0, rm * 2 + 1); + tmp = gen_vfp_mrs(); + store_reg(s, rn, tmp); + } else { + gen_mov_F0_vreg(0, rm); + tmp = gen_vfp_mrs(); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(0, rm + 1); + tmp = gen_vfp_mrs(); + store_reg(s, rn, tmp); + } + } else { + /* arm->vfp */ + if (dp) { + tmp = load_reg(s, rd); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm * 2); + tmp = load_reg(s, rn); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm * 2 + 1); + } else { + tmp = load_reg(s, rd); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm); + tmp = load_reg(s, rn); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm + 1); + } + } + } else { + /* Load/store */ + rn = (insn >> 16) & 0xf; + if (dp) + VFP_DREG_D(rd, insn); + else + rd = VFP_SREG_D(insn); + if ((insn & 0x01200000) == 0x01000000) { + /* Single load/store */ + offset = (insn & 0xff) << 2; + if ((insn & (1 << 23)) == 0) + offset = -offset; + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + tcg_gen_addi_i32(addr, addr, offset); + if (insn & (1 << 20)) { + gen_vfp_ld(s, dp, addr); + gen_mov_vreg_F0(dp, rd); + } else { + gen_mov_F0_vreg(dp, rd); + gen_vfp_st(s, dp, addr); + } + tcg_temp_free_i32(addr); + } else { + /* load/store multiple */ + int w = insn & (1 << 21); + if (dp) + n = (insn >> 1) & 0x7f; + else + n = insn & 0xff; + + if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { + /* P == U , W == 1 => UNDEF */ + return 1; + } + if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { + /* UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return 1; + } + if (rn == 15 && w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return 1; + } + + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + if (insn & (1 << 24)) /* pre-decrement */ + tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2)); + + if (dp) + offset = 8; + else + offset = 4; + for (i = 0; i < n; i++) { + if (insn & ARM_CP_RW_BIT) { + /* load */ + gen_vfp_ld(s, dp, addr); + gen_mov_vreg_F0(dp, rd + i); + } else { + /* store */ + gen_mov_F0_vreg(dp, rd + i); + gen_vfp_st(s, dp, addr); + } + tcg_gen_addi_i32(addr, addr, offset); + } + if (w) { + /* writeback */ + if (insn & (1 << 24)) + offset = -offset * n; + else if (dp && (insn & 1)) + offset = 4; + else + offset = 0; + + if (offset != 0) + tcg_gen_addi_i32(addr, addr, offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + } + } + break; + default: + /* Should never happen. */ + return 1; + } + return 0; +} + +static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) +{ + TranslationBlock *tb; + + tb = s->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { + tcg_gen_goto_tb(n); + gen_set_pc_im(dest); + tcg_gen_exit_tb((tcg_target_long)tb + n); + } else { + gen_set_pc_im(dest); + tcg_gen_exit_tb(0); + } +} + +static inline void gen_jmp (DisasContext *s, uint32_t dest) +{ + if (unlikely(s->singlestep_enabled)) { + /* An indirect jump so that we still trigger the debug exception. */ + if (s->thumb) + dest |= 1; + gen_bx_im(s, dest); + } else { + gen_goto_tb(s, 0, dest); + s->is_jmp = DISAS_TB_JUMP; + } +} + +static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y) +{ + if (x) + tcg_gen_sari_i32(t0, t0, 16); + else + gen_sxth(t0); + if (y) + tcg_gen_sari_i32(t1, t1, 16); + else + gen_sxth(t1); + tcg_gen_mul_i32(t0, t0, t1); +} + +/* Return the mask of PSR bits set by a MSR instruction. */ +static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) { + uint32_t mask; + + mask = 0; + if (flags & (1 << 0)) + mask |= 0xff; + if (flags & (1 << 1)) + mask |= 0xff00; + if (flags & (1 << 2)) + mask |= 0xff0000; + if (flags & (1 << 3)) + mask |= 0xff000000; + + /* Mask out undefined bits. */ + mask &= ~CPSR_RESERVED; + if (!arm_feature(env, ARM_FEATURE_V4T)) + mask &= ~CPSR_T; + if (!arm_feature(env, ARM_FEATURE_V5)) + mask &= ~CPSR_Q; /* V5TE in reality*/ + if (!arm_feature(env, ARM_FEATURE_V6)) + mask &= ~(CPSR_E | CPSR_GE); + if (!arm_feature(env, ARM_FEATURE_THUMB2)) + mask &= ~CPSR_IT; + /* Mask out execution state bits. */ + if (!spsr) + mask &= ~CPSR_EXEC; + /* Mask out privileged bits. */ + if (IS_USER(s)) + mask &= CPSR_USER; + return mask; +} + +/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ +static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0) +{ + TCGv tmp; + if (spsr) { + /* ??? This is also undefined in system mode. */ + if (IS_USER(s)) + return 1; + + tmp = load_cpu_field(spsr); + tcg_gen_andi_i32(tmp, tmp, ~mask); + tcg_gen_andi_i32(t0, t0, mask); + tcg_gen_or_i32(tmp, tmp, t0); + store_cpu_field(tmp, spsr); + } else { + gen_set_cpsr(t0, mask); + } + tcg_temp_free_i32(t0); + gen_lookup_tb(s); + return 0; +} + +/* Returns nonzero if access to the PSR is not permitted. */ +static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) +{ + TCGv tmp; + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + return gen_set_psr(s, mask, spsr, tmp); +} + +/* Generate an old-style exception return. Marks pc as dead. */ +static void gen_exception_return(DisasContext *s, TCGv pc) +{ + TCGv tmp; + store_reg(s, 15, pc); + tmp = load_cpu_field(spsr); + gen_set_cpsr(tmp, 0xffffffff); + tcg_temp_free_i32(tmp); + s->is_jmp = DISAS_UPDATE; +} + +/* Generate a v6 exception return. Marks both values as dead. */ +static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr) +{ + gen_set_cpsr(cpsr, 0xffffffff); + tcg_temp_free_i32(cpsr); + store_reg(s, 15, pc); + s->is_jmp = DISAS_UPDATE; +} + +static inline void +gen_set_condexec (DisasContext *s) +{ + if (s->condexec_mask) { + uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + store_cpu_field(tmp, condexec_bits); + } +} + +static void gen_exception_insn(DisasContext *s, int offset, int excp) +{ + gen_set_condexec(s); + gen_set_pc_im(s->pc - offset); + gen_exception(excp); + s->is_jmp = DISAS_JUMP; +} + +static void gen_nop_hint(DisasContext *s, int val) +{ + switch (val) { + case 3: /* wfi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + break; + case 2: /* wfe */ + case 4: /* sev */ + /* TODO: Implement SEV and WFE. May help SMP performance. */ + default: /* nop */ + break; + } +} + +#define CPU_V001 cpu_V0, cpu_V0, cpu_V1 + +static inline void gen_neon_add(int size, TCGv t0, TCGv t1) +{ + switch (size) { + case 0: gen_helper_neon_add_u8(t0, t0, t1); break; + case 1: gen_helper_neon_add_u16(t0, t0, t1); break; + case 2: tcg_gen_add_i32(t0, t0, t1); break; + default: abort(); + } +} + +static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1) +{ + switch (size) { + case 0: gen_helper_neon_sub_u8(t0, t1, t0); break; + case 1: gen_helper_neon_sub_u16(t0, t1, t0); break; + case 2: tcg_gen_sub_i32(t0, t1, t0); break; + default: return; + } +} + +/* 32-bit pairwise ops end up the same as the elementwise versions. */ +#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32 +#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32 +#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32 +#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32 + +#define GEN_NEON_INTEGER_OP_ENV(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +#define GEN_NEON_INTEGER_OP(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +static TCGv neon_load_scratch(int scratch) +{ + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + return tmp; +} + +static void neon_store_scratch(int scratch, TCGv var) +{ + tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + tcg_temp_free_i32(var); +} + +static inline TCGv neon_get_scalar(int size, int reg) +{ + TCGv tmp; + if (size == 1) { + tmp = neon_load_reg(reg & 7, reg >> 4); + if (reg & 8) { + gen_neon_dup_high16(tmp); + } else { + gen_neon_dup_low16(tmp); + } + } else { + tmp = neon_load_reg(reg & 15, reg >> 4); + } + return tmp; +} + +static int gen_neon_unzip(int rd, int rm, int size, int q) +{ + TCGv tmp, tmp2; + if (!q && size == 2) { + return 1; + } + tmp = tcg_const_i32(rd); + tmp2 = tcg_const_i32(rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qunzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qunzip16(cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qunzip32(cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_unzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_unzip16(cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + return 0; +} + +static int gen_neon_zip(int rd, int rm, int size, int q) +{ + TCGv tmp, tmp2; + if (!q && size == 2) { + return 1; + } + tmp = tcg_const_i32(rd); + tmp2 = tcg_const_i32(rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qzip16(cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qzip32(cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_zip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_zip16(cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + return 0; +} + +static void gen_neon_trn_u8(TCGv t0, TCGv t1) +{ + TCGv rd, tmp; + + rd = tcg_temp_new_i32(); + tmp = tcg_temp_new_i32(); + + tcg_gen_shli_i32(rd, t0, 8); + tcg_gen_andi_i32(rd, rd, 0xff00ff00); + tcg_gen_andi_i32(tmp, t1, 0x00ff00ff); + tcg_gen_or_i32(rd, rd, tmp); + + tcg_gen_shri_i32(t1, t1, 8); + tcg_gen_andi_i32(t1, t1, 0x00ff00ff); + tcg_gen_andi_i32(tmp, t0, 0xff00ff00); + tcg_gen_or_i32(t1, t1, tmp); + tcg_gen_mov_i32(t0, rd); + + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(rd); +} + +static void gen_neon_trn_u16(TCGv t0, TCGv t1) +{ + TCGv rd, tmp; + + rd = tcg_temp_new_i32(); + tmp = tcg_temp_new_i32(); + + tcg_gen_shli_i32(rd, t0, 16); + tcg_gen_andi_i32(tmp, t1, 0xffff); + tcg_gen_or_i32(rd, rd, tmp); + tcg_gen_shri_i32(t1, t1, 16); + tcg_gen_andi_i32(tmp, t0, 0xffff0000); + tcg_gen_or_i32(t1, t1, tmp); + tcg_gen_mov_i32(t0, rd); + + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(rd); +} + + +static struct { + int nregs; + int interleave; + int spacing; +} neon_ls_element_type[11] = { + {4, 4, 1}, + {4, 4, 2}, + {4, 1, 1}, + {4, 2, 1}, + {3, 3, 1}, + {3, 3, 2}, + {3, 1, 1}, + {1, 1, 1}, + {2, 2, 1}, + {2, 2, 2}, + {2, 1, 1} +}; + +/* Translate a NEON load/store element instruction. Return nonzero if the + instruction is invalid. */ +static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + int rd, rn, rm; + int op; + int nregs; + int interleave; + int spacing; + int stride; + int size; + int reg; + int pass; + int load; + int shift; + int n; + TCGv addr; + TCGv tmp; + TCGv tmp2; + TCGv_i64 tmp64; + + if (!s->vfp_enabled) + return 1; + VFP_DREG_D(rd, insn); + rn = (insn >> 16) & 0xf; + rm = insn & 0xf; + load = (insn & (1 << 21)) != 0; + if ((insn & (1 << 23)) == 0) { + /* Load store all elements. */ + op = (insn >> 8) & 0xf; + size = (insn >> 6) & 3; + if (op > 10) + return 1; + /* Catch UNDEF cases for bad values of align field */ + switch (op & 0xc) { + case 4: + if (((insn >> 5) & 1) == 1) { + return 1; + } + break; + case 8: + if (((insn >> 4) & 3) == 3) { + return 1; + } + break; + default: + break; + } + nregs = neon_ls_element_type[op].nregs; + interleave = neon_ls_element_type[op].interleave; + spacing = neon_ls_element_type[op].spacing; + if (size == 3 && (interleave | spacing) != 1) + return 1; + addr = tcg_temp_new_i32(); + load_reg_var(s, addr, rn); + stride = (1 << size) * interleave; + for (reg = 0; reg < nregs; reg++) { + if (interleave > 2 || (interleave == 2 && nregs == 2)) { + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(addr, addr, (1 << size) * reg); + } else if (interleave == 2 && nregs == 4 && reg == 2) { + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(addr, addr, 1 << size); + } + if (size == 3) { + if (load) { + tmp64 = gen_ld64(addr, IS_USER(s)); + neon_store_reg64(tmp64, rd); + tcg_temp_free_i64(tmp64); + } else { + tmp64 = tcg_temp_new_i64(); + neon_load_reg64(tmp64, rd); + gen_st64(tmp64, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, stride); + } else { + for (pass = 0; pass < 2; pass++) { + if (size == 2) { + if (load) { + tmp = gen_ld32(addr, IS_USER(s)); + neon_store_reg(rd, pass, tmp); + } else { + tmp = neon_load_reg(rd, pass); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, stride); + } else if (size == 1) { + if (load) { + tmp = gen_ld16u(addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + tmp2 = gen_ld16u(addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + neon_store_reg(rd, pass, tmp); + } else { + tmp = neon_load_reg(rd, pass); + tmp2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(tmp2, tmp, 16); + gen_st16(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + gen_st16(tmp2, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + } + } else /* size == 0 */ { + if (load) { + TCGV_UNUSED(tmp2); + for (n = 0; n < 4; n++) { + tmp = gen_ld8u(addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + if (n == 0) { + tmp2 = tmp; + } else { + tcg_gen_shli_i32(tmp, tmp, n * 8); + tcg_gen_or_i32(tmp2, tmp2, tmp); + tcg_temp_free_i32(tmp); + } + } + neon_store_reg(rd, pass, tmp2); + } else { + tmp2 = neon_load_reg(rd, pass); + for (n = 0; n < 4; n++) { + tmp = tcg_temp_new_i32(); + if (n == 0) { + tcg_gen_mov_i32(tmp, tmp2); + } else { + tcg_gen_shri_i32(tmp, tmp2, n * 8); + } + gen_st8(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, stride); + } + tcg_temp_free_i32(tmp2); + } + } + } + } + rd += spacing; + } + tcg_temp_free_i32(addr); + stride = nregs * 8; + } else { + size = (insn >> 10) & 3; + if (size == 3) { + /* Load single element to all lanes. */ + int a = (insn >> 4) & 1; + if (!load) { + return 1; + } + size = (insn >> 6) & 3; + nregs = ((insn >> 8) & 3) + 1; + + if (size == 3) { + if (nregs != 4 || a == 0) { + return 1; + } + /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ + size = 2; + } + if (nregs == 1 && a == 1 && size == 0) { + return 1; + } + if (nregs == 3 && a == 1) { + return 1; + } + addr = tcg_temp_new_i32(); + load_reg_var(s, addr, rn); + if (nregs == 1) { + /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ + tmp = gen_load_and_replicate(s, addr, size); + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); + if (insn & (1 << 5)) { + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0)); + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1)); + } + tcg_temp_free_i32(tmp); + } else { + /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ + stride = (insn & (1 << 5)) ? 2 : 1; + for (reg = 0; reg < nregs; reg++) { + tmp = gen_load_and_replicate(s, addr, size); + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); + tcg_temp_free_i32(tmp); + tcg_gen_addi_i32(addr, addr, 1 << size); + rd += stride; + } + } + tcg_temp_free_i32(addr); + stride = (1 << size) * nregs; + } else { + /* Single element. */ + int idx = (insn >> 4) & 0xf; + pass = (insn >> 7) & 1; + switch (size) { + case 0: + shift = ((insn >> 5) & 3) * 8; + stride = 1; + break; + case 1: + shift = ((insn >> 6) & 1) * 16; + stride = (insn & (1 << 5)) ? 2 : 1; + break; + case 2: + shift = 0; + stride = (insn & (1 << 6)) ? 2 : 1; + break; + default: + abort(); + } + nregs = ((insn >> 8) & 3) + 1; + /* Catch the UNDEF cases. This is unavoidably a bit messy. */ + switch (nregs) { + case 1: + if (((idx & (1 << size)) != 0) || + (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { + return 1; + } + break; + case 3: + if ((idx & 1) != 0) { + return 1; + } + /* fall through */ + case 2: + if (size == 2 && (idx & 2) != 0) { + return 1; + } + break; + case 4: + if ((size == 2) && ((idx & 3) == 3)) { + return 1; + } + break; + default: + abort(); + } + if ((rd + stride * (nregs - 1)) > 31) { + /* Attempts to write off the end of the register file + * are UNPREDICTABLE; we choose to UNDEF because otherwise + * the neon_load_reg() would write off the end of the array. + */ + return 1; + } + addr = tcg_temp_new_i32(); + load_reg_var(s, addr, rn); + for (reg = 0; reg < nregs; reg++) { + if (load) { + switch (size) { + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); + } + if (size != 2) { + tmp2 = neon_load_reg(rd, pass); + gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff); + tcg_temp_free_i32(tmp2); + } + neon_store_reg(rd, pass, tmp); + } else { /* Store */ + tmp = neon_load_reg(rd, pass); + if (shift) + tcg_gen_shri_i32(tmp, tmp, shift); + switch (size) { + case 0: + gen_st8(tmp, addr, IS_USER(s)); + break; + case 1: + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: + gen_st32(tmp, addr, IS_USER(s)); + break; + } + } + rd += stride; + tcg_gen_addi_i32(addr, addr, 1 << size); + } + tcg_temp_free_i32(addr); + stride = nregs * (1 << size); + } + } + if (rm != 15) { + TCGv base; + + base = load_reg(s, rn); + if (rm == 13) { + tcg_gen_addi_i32(base, base, stride); + } else { + TCGv index; + index = load_reg(s, rm); + tcg_gen_add_i32(base, base, index); + tcg_temp_free_i32(index); + } + store_reg(s, rn, base); + } + return 0; +} + +/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ +static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c) +{ + tcg_gen_and_i32(t, t, c); + tcg_gen_andc_i32(f, f, c); + tcg_gen_or_i32(dest, t, f); +} + +static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_u8(dest, src); break; + case 1: gen_helper_neon_narrow_u16(dest, src); break; + case 2: tcg_gen_trunc_i64_i32(dest, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break; + case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break; + case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift, + int q, int u) +{ + if (q) { + if (u) { + switch (size) { + case 1: gen_helper_neon_rshl_u16(var, var, shift); break; + case 2: gen_helper_neon_rshl_u32(var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_rshl_s16(var, var, shift); break; + case 2: gen_helper_neon_rshl_s32(var, var, shift); break; + default: abort(); + } + } + } else { + if (u) { + switch (size) { + case 1: gen_helper_neon_shl_u16(var, var, shift); break; + case 2: gen_helper_neon_shl_u32(var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_shl_s16(var, var, shift); break; + case 2: gen_helper_neon_shl_s32(var, var, shift); break; + default: abort(); + } + } + } +} + +static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u) +{ + if (u) { + switch (size) { + case 0: gen_helper_neon_widen_u8(dest, src); break; + case 1: gen_helper_neon_widen_u16(dest, src); break; + case 2: tcg_gen_extu_i32_i64(dest, src); break; + default: abort(); + } + } else { + switch (size) { + case 0: gen_helper_neon_widen_s8(dest, src); break; + case 1: gen_helper_neon_widen_s16(dest, src); break; + case 2: tcg_gen_ext_i32_i64(dest, src); break; + default: abort(); + } + } + tcg_temp_free_i32(src); +} + +static inline void gen_neon_addl(int size) +{ + switch (size) { + case 0: gen_helper_neon_addl_u16(CPU_V001); break; + case 1: gen_helper_neon_addl_u32(CPU_V001); break; + case 2: tcg_gen_add_i64(CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_subl(int size) +{ + switch (size) { + case 0: gen_helper_neon_subl_u16(CPU_V001); break; + case 1: gen_helper_neon_subl_u32(CPU_V001); break; + case 2: tcg_gen_sub_i64(CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_negl(TCGv_i64 var, int size) +{ + switch (size) { + case 0: gen_helper_neon_negl_u16(var, var); break; + case 1: gen_helper_neon_negl_u32(var, var); break; + case 2: gen_helper_neon_negl_u64(var, var); break; + default: abort(); + } +} + +static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size) +{ + switch (size) { + case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break; + case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break; + default: abort(); + } +} + +static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u) +{ + TCGv_i64 tmp; + + switch ((size << 1) | u) { + case 0: gen_helper_neon_mull_s8(dest, a, b); break; + case 1: gen_helper_neon_mull_u8(dest, a, b); break; + case 2: gen_helper_neon_mull_s16(dest, a, b); break; + case 3: gen_helper_neon_mull_u16(dest, a, b); break; + case 4: + tmp = gen_muls_i64_i32(a, b); + tcg_gen_mov_i64(dest, tmp); + tcg_temp_free_i64(tmp); + break; + case 5: + tmp = gen_mulu_i64_i32(a, b); + tcg_gen_mov_i64(dest, tmp); + tcg_temp_free_i64(tmp); + break; + default: abort(); + } + + /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. + Don't forget to clean them now. */ + if (size < 2) { + tcg_temp_free_i32(a); + tcg_temp_free_i32(b); + } +} + +static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src) +{ + if (op) { + if (u) { + gen_neon_unarrow_sats(size, dest, src); + } else { + gen_neon_narrow(size, dest, src); + } + } else { + if (u) { + gen_neon_narrow_satu(size, dest, src); + } else { + gen_neon_narrow_sats(size, dest, src); + } + } +} + +/* Symbolic constants for op fields for Neon 3-register same-length. + * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B + * table A7-9. + */ +#define NEON_3R_VHADD 0 +#define NEON_3R_VQADD 1 +#define NEON_3R_VRHADD 2 +#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */ +#define NEON_3R_VHSUB 4 +#define NEON_3R_VQSUB 5 +#define NEON_3R_VCGT 6 +#define NEON_3R_VCGE 7 +#define NEON_3R_VSHL 8 +#define NEON_3R_VQSHL 9 +#define NEON_3R_VRSHL 10 +#define NEON_3R_VQRSHL 11 +#define NEON_3R_VMAX 12 +#define NEON_3R_VMIN 13 +#define NEON_3R_VABD 14 +#define NEON_3R_VABA 15 +#define NEON_3R_VADD_VSUB 16 +#define NEON_3R_VTST_VCEQ 17 +#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */ +#define NEON_3R_VMUL 19 +#define NEON_3R_VPMAX 20 +#define NEON_3R_VPMIN 21 +#define NEON_3R_VQDMULH_VQRDMULH 22 +#define NEON_3R_VPADD 23 +#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ +#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ +#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ +#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ +#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ +#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ +#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */ + +static const uint8_t neon_3r_sizes[] = { + [NEON_3R_VHADD] = 0x7, + [NEON_3R_VQADD] = 0xf, + [NEON_3R_VRHADD] = 0x7, + [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */ + [NEON_3R_VHSUB] = 0x7, + [NEON_3R_VQSUB] = 0xf, + [NEON_3R_VCGT] = 0x7, + [NEON_3R_VCGE] = 0x7, + [NEON_3R_VSHL] = 0xf, + [NEON_3R_VQSHL] = 0xf, + [NEON_3R_VRSHL] = 0xf, + [NEON_3R_VQRSHL] = 0xf, + [NEON_3R_VMAX] = 0x7, + [NEON_3R_VMIN] = 0x7, + [NEON_3R_VABD] = 0x7, + [NEON_3R_VABA] = 0x7, + [NEON_3R_VADD_VSUB] = 0xf, + [NEON_3R_VTST_VCEQ] = 0x7, + [NEON_3R_VML] = 0x7, + [NEON_3R_VMUL] = 0x7, + [NEON_3R_VPMAX] = 0x7, + [NEON_3R_VPMIN] = 0x7, + [NEON_3R_VQDMULH_VQRDMULH] = 0x6, + [NEON_3R_VPADD] = 0x7, + [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */ +}; + +/* Symbolic constants for op fields for Neon 2-register miscellaneous. + * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B + * table A7-13. + */ +#define NEON_2RM_VREV64 0 +#define NEON_2RM_VREV32 1 +#define NEON_2RM_VREV16 2 +#define NEON_2RM_VPADDL 4 +#define NEON_2RM_VPADDL_U 5 +#define NEON_2RM_VCLS 8 +#define NEON_2RM_VCLZ 9 +#define NEON_2RM_VCNT 10 +#define NEON_2RM_VMVN 11 +#define NEON_2RM_VPADAL 12 +#define NEON_2RM_VPADAL_U 13 +#define NEON_2RM_VQABS 14 +#define NEON_2RM_VQNEG 15 +#define NEON_2RM_VCGT0 16 +#define NEON_2RM_VCGE0 17 +#define NEON_2RM_VCEQ0 18 +#define NEON_2RM_VCLE0 19 +#define NEON_2RM_VCLT0 20 +#define NEON_2RM_VABS 22 +#define NEON_2RM_VNEG 23 +#define NEON_2RM_VCGT0_F 24 +#define NEON_2RM_VCGE0_F 25 +#define NEON_2RM_VCEQ0_F 26 +#define NEON_2RM_VCLE0_F 27 +#define NEON_2RM_VCLT0_F 28 +#define NEON_2RM_VABS_F 30 +#define NEON_2RM_VNEG_F 31 +#define NEON_2RM_VSWP 32 +#define NEON_2RM_VTRN 33 +#define NEON_2RM_VUZP 34 +#define NEON_2RM_VZIP 35 +#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ +#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ +#define NEON_2RM_VSHLL 38 +#define NEON_2RM_VCVT_F16_F32 44 +#define NEON_2RM_VCVT_F32_F16 46 +#define NEON_2RM_VRECPE 56 +#define NEON_2RM_VRSQRTE 57 +#define NEON_2RM_VRECPE_F 58 +#define NEON_2RM_VRSQRTE_F 59 +#define NEON_2RM_VCVT_FS 60 +#define NEON_2RM_VCVT_FU 61 +#define NEON_2RM_VCVT_SF 62 +#define NEON_2RM_VCVT_UF 63 + +static int neon_2rm_is_float_op(int op) +{ + /* Return true if this neon 2reg-misc op is float-to-float */ + return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F || + op >= NEON_2RM_VRECPE_F); +} + +/* Each entry in this array has bit n set if the insn allows + * size value n (otherwise it will UNDEF). Since unallocated + * op values will have no bits set they always UNDEF. + */ +static const uint8_t neon_2rm_sizes[] = { + [NEON_2RM_VREV64] = 0x7, + [NEON_2RM_VREV32] = 0x3, + [NEON_2RM_VREV16] = 0x1, + [NEON_2RM_VPADDL] = 0x7, + [NEON_2RM_VPADDL_U] = 0x7, + [NEON_2RM_VCLS] = 0x7, + [NEON_2RM_VCLZ] = 0x7, + [NEON_2RM_VCNT] = 0x1, + [NEON_2RM_VMVN] = 0x1, + [NEON_2RM_VPADAL] = 0x7, + [NEON_2RM_VPADAL_U] = 0x7, + [NEON_2RM_VQABS] = 0x7, + [NEON_2RM_VQNEG] = 0x7, + [NEON_2RM_VCGT0] = 0x7, + [NEON_2RM_VCGE0] = 0x7, + [NEON_2RM_VCEQ0] = 0x7, + [NEON_2RM_VCLE0] = 0x7, + [NEON_2RM_VCLT0] = 0x7, + [NEON_2RM_VABS] = 0x7, + [NEON_2RM_VNEG] = 0x7, + [NEON_2RM_VCGT0_F] = 0x4, + [NEON_2RM_VCGE0_F] = 0x4, + [NEON_2RM_VCEQ0_F] = 0x4, + [NEON_2RM_VCLE0_F] = 0x4, + [NEON_2RM_VCLT0_F] = 0x4, + [NEON_2RM_VABS_F] = 0x4, + [NEON_2RM_VNEG_F] = 0x4, + [NEON_2RM_VSWP] = 0x1, + [NEON_2RM_VTRN] = 0x7, + [NEON_2RM_VUZP] = 0x7, + [NEON_2RM_VZIP] = 0x7, + [NEON_2RM_VMOVN] = 0x7, + [NEON_2RM_VQMOVN] = 0x7, + [NEON_2RM_VSHLL] = 0x7, + [NEON_2RM_VCVT_F16_F32] = 0x2, + [NEON_2RM_VCVT_F32_F16] = 0x2, + [NEON_2RM_VRECPE] = 0x4, + [NEON_2RM_VRSQRTE] = 0x4, + [NEON_2RM_VRECPE_F] = 0x4, + [NEON_2RM_VRSQRTE_F] = 0x4, + [NEON_2RM_VCVT_FS] = 0x4, + [NEON_2RM_VCVT_FU] = 0x4, + [NEON_2RM_VCVT_SF] = 0x4, + [NEON_2RM_VCVT_UF] = 0x4, +}; + +/* Translate a NEON data processing instruction. Return nonzero if the + instruction is invalid. + We process data in a mixture of 32-bit and 64-bit chunks. + Mostly we use 32-bit chunks so we can use normal scalar instructions. */ + +static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + int op; + int q; + int rd, rn, rm; + int size; + int shift; + int pass; + int count; + int pairwise; + int u; + uint32_t imm, mask; + TCGv tmp, tmp2, tmp3, tmp4, tmp5; + TCGv_i64 tmp64; + + if (!s->vfp_enabled) + return 1; + q = (insn & (1 << 6)) != 0; + u = (insn >> 24) & 1; + VFP_DREG_D(rd, insn); + VFP_DREG_N(rn, insn); + VFP_DREG_M(rm, insn); + size = (insn >> 20) & 3; + if ((insn & (1 << 23)) == 0) { + /* Three register same length. */ + op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); + /* Catch invalid op and bad size combinations: UNDEF */ + if ((neon_3r_sizes[op] & (1 << size)) == 0) { + return 1; + } + /* All insns of this form UNDEF for either this condition or the + * superset of cases "Q==1"; we catch the latter later. + */ + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + if (size == 3 && op != NEON_3R_LOGIC) { + /* 64-bit element instructions. */ + for (pass = 0; pass < (q ? 2 : 1); pass++) { + neon_load_reg64(cpu_V0, rn + pass); + neon_load_reg64(cpu_V1, rm + pass); + switch (op) { + case NEON_3R_VQADD: + if (u) { + gen_helper_neon_qadd_u64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } else { + gen_helper_neon_qadd_s64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } + break; + case NEON_3R_VQSUB: + if (u) { + gen_helper_neon_qsub_u64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } else { + gen_helper_neon_qsub_s64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } + break; + case NEON_3R_VSHL: + if (u) { + gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0); + } else { + gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VQSHL: + if (u) { + gen_helper_neon_qshl_u64(cpu_V0, cpu_env, + cpu_V1, cpu_V0); + } else { + gen_helper_neon_qshl_s64(cpu_V0, cpu_env, + cpu_V1, cpu_V0); + } + break; + case NEON_3R_VRSHL: + if (u) { + gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0); + } else { + gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VQRSHL: + if (u) { + gen_helper_neon_qrshl_u64(cpu_V0, cpu_env, + cpu_V1, cpu_V0); + } else { + gen_helper_neon_qrshl_s64(cpu_V0, cpu_env, + cpu_V1, cpu_V0); + } + break; + case NEON_3R_VADD_VSUB: + if (u) { + tcg_gen_sub_i64(CPU_V001); + } else { + tcg_gen_add_i64(CPU_V001); + } + break; + default: + abort(); + } + neon_store_reg64(cpu_V0, rd + pass); + } + return 0; + } + pairwise = 0; + switch (op) { + case NEON_3R_VSHL: + case NEON_3R_VQSHL: + case NEON_3R_VRSHL: + case NEON_3R_VQRSHL: + { + int rtmp; + /* Shift instruction operands are reversed. */ + rtmp = rn; + rn = rm; + rm = rtmp; + } + break; + case NEON_3R_VPADD: + if (u) { + return 1; + } + /* Fall through */ + case NEON_3R_VPMAX: + case NEON_3R_VPMIN: + pairwise = 1; + break; + case NEON_3R_FLOAT_ARITH: + pairwise = (u && size < 2); /* if VPADD (float) */ + break; + case NEON_3R_FLOAT_MINMAX: + pairwise = u; /* if VPMIN/VPMAX (float) */ + break; + case NEON_3R_FLOAT_CMP: + if (!u && size) { + /* no encoding for U=0 C=1x */ + return 1; + } + break; + case NEON_3R_FLOAT_ACMP: + if (!u) { + return 1; + } + break; + case NEON_3R_VRECPS_VRSQRTS: + if (u) { + return 1; + } + break; + case NEON_3R_VMUL: + if (u && (size != 0)) { + /* UNDEF on invalid size for polynomial subcase */ + return 1; + } + break; + case NEON_3R_VFM: + if (!arm_feature(env, ARM_FEATURE_VFP4) || u) { + return 1; + } + break; + default: + break; + } + + if (pairwise && q) { + /* All the pairwise insns UNDEF if Q is set */ + return 1; + } + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + + if (pairwise) { + /* Pairwise. */ + if (pass < 1) { + tmp = neon_load_reg(rn, 0); + tmp2 = neon_load_reg(rn, 1); + } else { + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + } + } else { + /* Elementwise. */ + tmp = neon_load_reg(rn, pass); + tmp2 = neon_load_reg(rm, pass); + } + switch (op) { + case NEON_3R_VHADD: + GEN_NEON_INTEGER_OP(hadd); + break; + case NEON_3R_VQADD: + GEN_NEON_INTEGER_OP_ENV(qadd); + break; + case NEON_3R_VRHADD: + GEN_NEON_INTEGER_OP(rhadd); + break; + case NEON_3R_LOGIC: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + case 1: /* BIC */ + tcg_gen_andc_i32(tmp, tmp, tmp2); + break; + case 2: /* VORR */ + tcg_gen_or_i32(tmp, tmp, tmp2); + break; + case 3: /* VORN */ + tcg_gen_orc_i32(tmp, tmp, tmp2); + break; + case 4: /* VEOR */ + tcg_gen_xor_i32(tmp, tmp, tmp2); + break; + case 5: /* VBSL */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp2, tmp3); + tcg_temp_free_i32(tmp3); + break; + case 6: /* VBIT */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp3, tmp2); + tcg_temp_free_i32(tmp3); + break; + case 7: /* VBIF */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + break; + } + break; + case NEON_3R_VHSUB: + GEN_NEON_INTEGER_OP(hsub); + break; + case NEON_3R_VQSUB: + GEN_NEON_INTEGER_OP_ENV(qsub); + break; + case NEON_3R_VCGT: + GEN_NEON_INTEGER_OP(cgt); + break; + case NEON_3R_VCGE: + GEN_NEON_INTEGER_OP(cge); + break; + case NEON_3R_VSHL: + GEN_NEON_INTEGER_OP(shl); + break; + case NEON_3R_VQSHL: + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + case NEON_3R_VRSHL: + GEN_NEON_INTEGER_OP(rshl); + break; + case NEON_3R_VQRSHL: + //GEN_NEON_INTEGER_OP_ENV(qrshl); + break; + case NEON_3R_VMAX: + GEN_NEON_INTEGER_OP(max); + break; + case NEON_3R_VMIN: + GEN_NEON_INTEGER_OP(min); + break; + case NEON_3R_VABD: + GEN_NEON_INTEGER_OP(abd); + break; + case NEON_3R_VABA: + GEN_NEON_INTEGER_OP(abd); + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + gen_neon_add(size, tmp, tmp2); + break; + case NEON_3R_VADD_VSUB: + if (!u) { /* VADD */ + gen_neon_add(size, tmp, tmp2); + } else { /* VSUB */ + switch (size) { + case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VTST_VCEQ: + if (!u) { /* VTST */ + switch (size) { + case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; + default: abort(); + } + } else { /* VCEQ */ + switch (size) { + case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ + switch (size) { + case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (u) { /* VMLS */ + gen_neon_rsb(size, tmp, tmp2); + } else { /* VMLA */ + gen_neon_add(size, tmp, tmp2); + } + break; + case NEON_3R_VMUL: + if (u) { /* polynomial */ + gen_helper_neon_mul_p8(tmp, tmp, tmp2); + } else { /* Integer */ + switch (size) { + case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VPMAX: + GEN_NEON_INTEGER_OP(pmax); + break; + case NEON_3R_VPMIN: + GEN_NEON_INTEGER_OP(pmin); + break; + case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ + if (!u) { /* VQDMULH */ + switch (size) { + case 1: + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: abort(); + } + } else { /* VQRDMULH */ + switch (size) { + case 1: + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: abort(); + } + } + break; + case NEON_3R_VPADD: + switch (size) { + case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break; + default: abort(); + } + break; + case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + switch ((u << 2) | size) { + case 0: /* VADD */ + case 4: /* VPADD */ + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + break; + case 2: /* VSUB */ + gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus); + break; + case 6: /* VABD */ + gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus); + break; + default: + abort(); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_MULTIPLY: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); + if (!u) { + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (size == 0) { + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); + } + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_CMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (!u) { + gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); + } else { + if (size == 0) { + gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); + } + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_ACMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (size == 0) { + gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_MINMAX: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (size == 0) { + gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_VRECPS_VRSQRTS: + if (size == 0) + gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); + else + gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); + break; + case NEON_3R_VFM: + { + /* VFMA, VFMS: fused multiply-add */ + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + TCGv_i32 tmp3 = neon_load_reg(rd, pass); + if (size) { + /* VFMS */ + gen_helper_vfp_negs(tmp, tmp); + } + gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus); + tcg_temp_free_i32(tmp3); + tcg_temp_free_ptr(fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tmp2); + + /* Save the result. For elementwise operations we can put it + straight into the destination register. For pairwise operations + we have to be careful to avoid clobbering the source operands. */ + if (pairwise && rd == rm) { + neon_store_scratch(pass, tmp); + } else { + neon_store_reg(rd, pass, tmp); + } + + } /* for pass */ + if (pairwise && rd == rm) { + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp = neon_load_scratch(pass); + neon_store_reg(rd, pass, tmp); + } + } + /* End of 3 register same size operations. */ + } else if (insn & (1 << 4)) { + if ((insn & 0x00380080) != 0) { + /* Two registers and shift. */ + op = (insn >> 8) & 0xf; + if (insn & (1 << 7)) { + /* 64-bit shift. */ + if (op > 7) { + return 1; + } + size = 3; + } else { + size = 2; + while ((insn & (1 << (size + 19))) == 0) + size--; + } + shift = (insn >> 16) & ((1 << (3 + size)) - 1); + /* To avoid excessive dumplication of ops we implement shift + by immediate using the variable shift operations. */ + if (op < 8) { + /* Shift by immediate: + VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ + if (q && ((rd | rm) & 1)) { + return 1; + } + if (!u && (op == 4 || op == 6)) { + return 1; + } + /* Right shifts are encoded as N - shift, where N is the + element size in bits. */ + if (op <= 4) + shift = shift - (1 << (size + 3)); + if (size == 3) { + count = q + 1; + } else { + count = q ? 4: 2; + } + switch (size) { + case 0: + imm = (uint8_t) shift; + imm |= imm << 8; + imm |= imm << 16; + break; + case 1: + imm = (uint16_t) shift; + imm |= imm << 16; + break; + case 2: + case 3: + imm = shift; + break; + default: + abort(); + } + + for (pass = 0; pass < count; pass++) { + if (size == 3) { + neon_load_reg64(cpu_V0, rm + pass); + tcg_gen_movi_i64(cpu_V1, imm); + switch (op) { + case 0: /* VSHR */ + case 1: /* VSRA */ + if (u) + gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); + else + gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); + break; + case 2: /* VRSHR */ + case 3: /* VRSRA */ + if (u) + gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1); + else + gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); + break; + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ + gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); + break; + case 6: /* VQSHLU */ + gen_helper_neon_qshlu_s64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + break; + case 7: /* VQSHL */ + if (u) { + gen_helper_neon_qshl_u64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } else { + gen_helper_neon_qshl_s64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } + break; + } + if (op == 1 || op == 3) { + /* Accumulate. */ + neon_load_reg64(cpu_V1, rd + pass); + tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1); + } else if (op == 4 || (op == 5 && u)) { + /* Insert */ + neon_load_reg64(cpu_V1, rd + pass); + uint64_t mask; + if (shift < -63 || shift > 63) { + mask = 0; + } else { + if (op == 4) { + mask = 0xffffffffffffffffull >> -shift; + } else { + mask = 0xffffffffffffffffull << shift; + } + } + tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask); + tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); + } + neon_store_reg64(cpu_V0, rd + pass); + } else { /* size < 3 */ + /* Operands in T0 and T1. */ + tmp = neon_load_reg(rm, pass); + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, imm); + switch (op) { + case 0: /* VSHR */ + case 1: /* VSRA */ + GEN_NEON_INTEGER_OP(shl); + break; + case 2: /* VRSHR */ + case 3: /* VRSRA */ + GEN_NEON_INTEGER_OP(rshl); + break; + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ + switch (size) { + case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; + default: abort(); + } + break; + case 6: /* VQSHLU */ + switch (size) { + case 0: + gen_helper_neon_qshlu_s8(tmp, cpu_env, + tmp, tmp2); + break; + case 1: + gen_helper_neon_qshlu_s16(tmp, cpu_env, + tmp, tmp2); + break; + case 2: + gen_helper_neon_qshlu_s32(tmp, cpu_env, + tmp, tmp2); + break; + default: + abort(); + } + break; + case 7: /* VQSHL */ + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + } + tcg_temp_free_i32(tmp2); + + if (op == 1 || op == 3) { + /* Accumulate. */ + tmp2 = neon_load_reg(rd, pass); + gen_neon_add(size, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } else if (op == 4 || (op == 5 && u)) { + /* Insert */ + switch (size) { + case 0: + if (op == 4) + mask = 0xff >> -shift; + else + mask = (uint8_t)(0xff << shift); + mask |= mask << 8; + mask |= mask << 16; + break; + case 1: + if (op == 4) + mask = 0xffff >> -shift; + else + mask = (uint16_t)(0xffff << shift); + mask |= mask << 16; + break; + case 2: + if (shift < -31 || shift > 31) { + mask = 0; + } else { + if (op == 4) + mask = 0xffffffffu >> -shift; + else + mask = 0xffffffffu << shift; + } + break; + default: + abort(); + } + tmp2 = neon_load_reg(rd, pass); + tcg_gen_andi_i32(tmp, tmp, mask); + tcg_gen_andi_i32(tmp2, tmp2, ~mask); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + neon_store_reg(rd, pass, tmp); + } + } /* for pass */ + } else if (op < 10) { + /* Shift by immediate and narrow: + VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ + int input_unsigned = (op == 8) ? !u : u; + if (rm & 1) { + return 1; + } + shift = shift - (1 << (size + 3)); + size++; + if (size == 3) { + tmp64 = tcg_const_i64(shift); + neon_load_reg64(cpu_V0, rm); + neon_load_reg64(cpu_V1, rm + 1); + for (pass = 0; pass < 2; pass++) { + TCGv_i64 in; + if (pass == 0) { + in = cpu_V0; + } else { + in = cpu_V1; + } + if (q) { + if (input_unsigned) { + gen_helper_neon_rshl_u64(cpu_V0, in, tmp64); + } else { + gen_helper_neon_rshl_s64(cpu_V0, in, tmp64); + } + } else { + if (input_unsigned) { + gen_helper_neon_shl_u64(cpu_V0, in, tmp64); + } else { + gen_helper_neon_shl_s64(cpu_V0, in, tmp64); + } + } + tmp = tcg_temp_new_i32(); + gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0); + neon_store_reg(rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i64(tmp64); + } else { + if (size == 1) { + imm = (uint16_t)shift; + imm |= imm << 16; + } else { + /* size == 2 */ + imm = (uint32_t)shift; + } + tmp2 = tcg_const_i32(imm); + tmp4 = neon_load_reg(rm + 1, 0); + tmp5 = neon_load_reg(rm + 1, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(rm, 0); + } else { + tmp = tmp4; + } + gen_neon_shift_narrow(size, tmp, tmp2, q, + input_unsigned); + if (pass == 0) { + tmp3 = neon_load_reg(rm, 1); + } else { + tmp3 = tmp5; + } + gen_neon_shift_narrow(size, tmp3, tmp2, q, + input_unsigned); + tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp3); + tmp = tcg_temp_new_i32(); + gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0); + neon_store_reg(rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i32(tmp2); + } + } else if (op == 10) { + /* VSHLL, VMOVL */ + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + + gen_neon_widen(cpu_V0, tmp, size, u); + + if (shift != 0) { + /* The shift is less than the width of the source + type, so we can just shift the whole register. */ + tcg_gen_shli_i64(cpu_V0, cpu_V0, shift); + /* Widen the result of shift: we need to clear + * the potential overflow bits resulting from + * left bits of the narrow input appearing as + * right bits of left the neighbour narrow + * input. */ + if (size < 2 || !u) { + uint64_t imm64; + if (size == 0) { + imm = (0xffu >> (8 - shift)); + imm |= imm << 16; + } else if (size == 1) { + imm = 0xffff >> (16 - shift); + } else { + /* size == 2 */ + imm = 0xffffffff >> (32 - shift); + } + if (size < 2) { + imm64 = imm | (((uint64_t)imm) << 32); + } else { + imm64 = imm; + } + tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64); + } + } + neon_store_reg64(cpu_V0, rd + pass); + } + } else if (op >= 14) { + /* VCVT fixed-point. */ + if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { + return 1; + } + /* We have already masked out the must-be-1 top bit of imm6, + * hence this 32-shift where the ARM ARM has 64-imm6. + */ + shift = 32 - shift; + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); + if (!(op & 1)) { + if (u) + gen_vfp_ulto(0, shift, 1); + else + gen_vfp_slto(0, shift, 1); + } else { + if (u) + gen_vfp_toul(0, shift, 1); + else + gen_vfp_tosl(0, shift, 1); + } + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); + } + } else { + return 1; + } + } else { /* (insn & 0x00380080) == 0 */ + int invert; + if (q && (rd & 1)) { + return 1; + } + + op = (insn >> 8) & 0xf; + /* One register and immediate. */ + imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); + invert = (insn & (1 << 5)) != 0; + /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. + * We choose to not special-case this and will behave as if a + * valid constant encoding of 0 had been given. + */ + switch (op) { + case 0: case 1: + /* no-op */ + break; + case 2: case 3: + imm <<= 8; + break; + case 4: case 5: + imm <<= 16; + break; + case 6: case 7: + imm <<= 24; + break; + case 8: case 9: + imm |= imm << 16; + break; + case 10: case 11: + imm = (imm << 8) | (imm << 24); + break; + case 12: + imm = (imm << 8) | 0xff; + break; + case 13: + imm = (imm << 16) | 0xffff; + break; + case 14: + imm |= (imm << 8) | (imm << 16) | (imm << 24); + if (invert) + imm = ~imm; + break; + case 15: + if (invert) { + return 1; + } + imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) + | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); + break; + } + if (invert) + imm = ~imm; + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + if (op & 1 && op < 12) { + tmp = neon_load_reg(rd, pass); + if (invert) { + /* The immediate value has already been inverted, so + BIC becomes AND. */ + tcg_gen_andi_i32(tmp, tmp, imm); + } else { + tcg_gen_ori_i32(tmp, tmp, imm); + } + } else { + /* VMOV, VMVN. */ + tmp = tcg_temp_new_i32(); + if (op == 14 && invert) { + int n; + uint32_t val; + val = 0; + for (n = 0; n < 4; n++) { + if (imm & (1 << (n + (pass & 1) * 4))) + val |= 0xff << (n * 8); + } + tcg_gen_movi_i32(tmp, val); + } else { + tcg_gen_movi_i32(tmp, imm); + } + } + neon_store_reg(rd, pass, tmp); + } + } + } else { /* (insn & 0x00800010 == 0x00800000) */ + if (size != 3) { + op = (insn >> 8) & 0xf; + if ((insn & (1 << 6)) == 0) { + /* Three registers of different lengths. */ + int src1_wide; + int src2_wide; + int prewiden; + /* undefreq: bit 0 : UNDEF if size != 0 + * bit 1 : UNDEF if size == 0 + * bit 2 : UNDEF if U == 1 + * Note that [1:0] set implies 'always UNDEF' + */ + int undefreq; + /* prewiden, src1_wide, src2_wide, undefreq */ + static const int neon_3reg_wide[16][4] = { + {1, 0, 0, 0}, /* VADDL */ + {1, 1, 0, 0}, /* VADDW */ + {1, 0, 0, 0}, /* VSUBL */ + {1, 1, 0, 0}, /* VSUBW */ + {0, 1, 1, 0}, /* VADDHN */ + {0, 0, 0, 0}, /* VABAL */ + {0, 1, 1, 0}, /* VSUBHN */ + {0, 0, 0, 0}, /* VABDL */ + {0, 0, 0, 0}, /* VMLAL */ + {0, 0, 0, 6}, /* VQDMLAL */ + {0, 0, 0, 0}, /* VMLSL */ + {0, 0, 0, 6}, /* VQDMLSL */ + {0, 0, 0, 0}, /* Integer VMULL */ + {0, 0, 0, 2}, /* VQDMULL */ + {0, 0, 0, 5}, /* Polynomial VMULL */ + {0, 0, 0, 3}, /* Reserved: always UNDEF */ + }; + + prewiden = neon_3reg_wide[op][0]; + src1_wide = neon_3reg_wide[op][1]; + src2_wide = neon_3reg_wide[op][2]; + undefreq = neon_3reg_wide[op][3]; + + if (((undefreq & 1) && (size != 0)) || + ((undefreq & 2) && (size == 0)) || + ((undefreq & 4) && u)) { + return 1; + } + if ((src1_wide && (rn & 1)) || + (src2_wide && (rm & 1)) || + (!src2_wide && (rd & 1))) { + return 1; + } + + /* Avoid overlapping operands. Wide source operands are + always aligned so will never overlap with wide + destinations in problematic ways. */ + if (rd == rm && !src2_wide) { + tmp = neon_load_reg(rm, 1); + neon_store_scratch(2, tmp); + } else if (rd == rn && !src1_wide) { + tmp = neon_load_reg(rn, 1); + neon_store_scratch(2, tmp); + } + TCGV_UNUSED(tmp3); + for (pass = 0; pass < 2; pass++) { + if (src1_wide) { + neon_load_reg64(cpu_V0, rn + pass); + TCGV_UNUSED(tmp); + } else { + if (pass == 1 && rd == rn) { + tmp = neon_load_scratch(2); + } else { + tmp = neon_load_reg(rn, pass); + } + if (prewiden) { + gen_neon_widen(cpu_V0, tmp, size, u); + } + } + if (src2_wide) { + neon_load_reg64(cpu_V1, rm + pass); + TCGV_UNUSED(tmp2); + } else { + if (pass == 1 && rd == rm) { + tmp2 = neon_load_scratch(2); + } else { + tmp2 = neon_load_reg(rm, pass); + } + if (prewiden) { + gen_neon_widen(cpu_V1, tmp2, size, u); + } + } + switch (op) { + case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ + gen_neon_addl(size); + break; + case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ + gen_neon_subl(size); + break; + case 5: case 7: /* VABAL, VABDL */ + switch ((size << 1) | u) { + case 0: + gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2); + break; + case 1: + gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2); + break; + case 2: + gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2); + break; + case 3: + gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2); + break; + case 4: + gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2); + break; + case 5: + gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2); + break; + default: abort(); + } + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + case 8: case 9: case 10: case 11: case 12: case 13: + /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ + gen_neon_mull(cpu_V0, tmp, tmp2, size, u); + break; + case 14: /* Polynomial VMULL */ + gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + default: /* 15 is RESERVED: caught earlier */ + abort(); + } + if (op == 13) { + /* VQDMULL */ + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + neon_store_reg64(cpu_V0, rd + pass); + } else if (op == 5 || (op >= 8 && op <= 11)) { + /* Accumulate. */ + neon_load_reg64(cpu_V1, rd + pass); + switch (op) { + case 10: /* VMLSL */ + gen_neon_negl(cpu_V0, size); + /* Fall through */ + case 5: case 8: /* VABAL, VMLAL */ + gen_neon_addl(size); + break; + case 9: case 11: /* VQDMLAL, VQDMLSL */ + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + if (op == 11) { + gen_neon_negl(cpu_V0, size); + } + gen_neon_addl_saturate(cpu_V0, cpu_V1, size); + break; + default: + abort(); + } + neon_store_reg64(cpu_V0, rd + pass); + } else if (op == 4 || op == 6) { + /* Narrowing operation. */ + tmp = tcg_temp_new_i32(); + if (!u) { + switch (size) { + case 0: + gen_helper_neon_narrow_high_u8(tmp, cpu_V0); + break; + case 1: + gen_helper_neon_narrow_high_u16(tmp, cpu_V0); + break; + case 2: + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(tmp, cpu_V0); + break; + default: abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0); + break; + case 1: + gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0); + break; + case 2: + tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(tmp, cpu_V0); + break; + default: abort(); + } + } + if (pass == 0) { + tmp3 = tmp; + } else { + neon_store_reg(rd, 0, tmp3); + neon_store_reg(rd, 1, tmp); + } + } else { + /* Write back the result. */ + neon_store_reg64(cpu_V0, rd + pass); + } + } + } else { + /* Two registers and a scalar. NB that for ops of this form + * the ARM ARM labels bit 24 as Q, but it is in our variable + * 'u', not 'q'. + */ + if (size == 0) { + return 1; + } + switch (op) { + case 1: /* Float VMLA scalar */ + case 5: /* Floating point VMLS scalar */ + case 9: /* Floating point VMUL scalar */ + if (size == 1) { + return 1; + } + /* fall through */ + case 0: /* Integer VMLA scalar */ + case 4: /* Integer VMLS scalar */ + case 8: /* Integer VMUL scalar */ + case 12: /* VQDMULH scalar */ + case 13: /* VQRDMULH scalar */ + if (u && ((rd | rn) & 1)) { + return 1; + } + tmp = neon_get_scalar(size, rm); + neon_store_scratch(0, tmp); + for (pass = 0; pass < (u ? 4 : 2); pass++) { + tmp = neon_load_scratch(0); + tmp2 = neon_load_reg(rn, pass); + if (op == 12) { + if (size == 1) { + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + } + } else if (op == 13) { + if (size == 1) { + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + } + } else if (op & 1) { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(fpstatus); + } else { + switch (size) { + case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; + default: abort(); + } + } + tcg_temp_free_i32(tmp2); + if (op < 8) { + /* Accumulate. */ + tmp2 = neon_load_reg(rd, pass); + switch (op) { + case 0: + gen_neon_add(size, tmp, tmp2); + break; + case 1: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(fpstatus); + break; + } + case 4: + gen_neon_rsb(size, tmp, tmp2); + break; + case 5: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); + tcg_temp_free_ptr(fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tmp2); + } + neon_store_reg(rd, pass, tmp); + } + break; + case 3: /* VQDMLAL scalar */ + case 7: /* VQDMLSL scalar */ + case 11: /* VQDMULL scalar */ + if (u == 1) { + return 1; + } + /* fall through */ + case 2: /* VMLAL sclar */ + case 6: /* VMLSL scalar */ + case 10: /* VMULL scalar */ + if (rd & 1) { + return 1; + } + tmp2 = neon_get_scalar(size, rm); + /* We need a copy of tmp2 because gen_neon_mull + * deletes it during pass 0. */ + tmp4 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp4, tmp2); + tmp3 = neon_load_reg(rn, 1); + + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(rn, 0); + } else { + tmp = tmp3; + tmp2 = tmp4; + } + gen_neon_mull(cpu_V0, tmp, tmp2, size, u); + if (op != 11) { + neon_load_reg64(cpu_V1, rd + pass); + } + switch (op) { + case 6: + gen_neon_negl(cpu_V0, size); + /* Fall through */ + case 2: + gen_neon_addl(size); + break; + case 3: case 7: + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + if (op == 7) { + gen_neon_negl(cpu_V0, size); + } + gen_neon_addl_saturate(cpu_V0, cpu_V1, size); + break; + case 10: + /* no-op */ + break; + case 11: + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + break; + default: + abort(); + } + neon_store_reg64(cpu_V0, rd + pass); + } + + + break; + default: /* 14 and 15 are RESERVED */ + return 1; + } + } + } else { /* size == 3 */ + if (!u) { + /* Extract. */ + imm = (insn >> 8) & 0xf; + + if (imm > 7 && !q) + return 1; + + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + + if (imm == 0) { + neon_load_reg64(cpu_V0, rn); + if (q) { + neon_load_reg64(cpu_V1, rn + 1); + } + } else if (imm == 8) { + neon_load_reg64(cpu_V0, rn + 1); + if (q) { + neon_load_reg64(cpu_V1, rm); + } + } else if (q) { + tmp64 = tcg_temp_new_i64(); + if (imm < 8) { + neon_load_reg64(cpu_V0, rn); + neon_load_reg64(tmp64, rn + 1); + } else { + neon_load_reg64(cpu_V0, rn + 1); + neon_load_reg64(tmp64, rm); + } + tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8); + tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8)); + tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); + if (imm < 8) { + neon_load_reg64(cpu_V1, rm); + } else { + neon_load_reg64(cpu_V1, rm + 1); + imm -= 8; + } + tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8)); + tcg_gen_shri_i64(tmp64, tmp64, imm * 8); + tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64); + tcg_temp_free_i64(tmp64); + } else { + /* BUGFIX */ + neon_load_reg64(cpu_V0, rn); + tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8); + neon_load_reg64(cpu_V1, rm); + tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8)); + tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); + } + neon_store_reg64(cpu_V0, rd); + if (q) { + neon_store_reg64(cpu_V1, rd + 1); + } + } else if ((insn & (1 << 11)) == 0) { + /* Two register misc. */ + op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); + size = (insn >> 18) & 3; + /* UNDEF for unknown op values and bad op-size combinations */ + if ((neon_2rm_sizes[op] & (1 << size)) == 0) { + return 1; + } + if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && + q && ((rm | rd) & 1)) { + return 1; + } + switch (op) { + case NEON_2RM_VREV64: + for (pass = 0; pass < (q ? 2 : 1); pass++) { + tmp = neon_load_reg(rm, pass * 2); + tmp2 = neon_load_reg(rm, pass * 2 + 1); + switch (size) { + case 0: tcg_gen_bswap32_i32(tmp, tmp); break; + case 1: gen_swap_half(tmp); break; + case 2: /* no-op */ break; + default: abort(); + } + neon_store_reg(rd, pass * 2 + 1, tmp); + if (size == 2) { + neon_store_reg(rd, pass * 2, tmp2); + } else { + switch (size) { + case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break; + case 1: gen_swap_half(tmp2); break; + default: abort(); + } + neon_store_reg(rd, pass * 2, tmp2); + } + } + break; + case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: + case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: + for (pass = 0; pass < q + 1; pass++) { + tmp = neon_load_reg(rm, pass * 2); + gen_neon_widen(cpu_V0, tmp, size, op & 1); + tmp = neon_load_reg(rm, pass * 2 + 1); + gen_neon_widen(cpu_V1, tmp, size, op & 1); + switch (size) { + case 0: gen_helper_neon_paddl_u16(CPU_V001); break; + case 1: gen_helper_neon_paddl_u32(CPU_V001); break; + case 2: tcg_gen_add_i64(CPU_V001); break; + default: abort(); + } + if (op >= NEON_2RM_VPADAL) { + /* Accumulate. */ + neon_load_reg64(cpu_V1, rd + pass); + gen_neon_addl(size); + } + neon_store_reg64(cpu_V0, rd + pass); + } + break; + case NEON_2RM_VTRN: + if (size == 2) { + int n; + for (n = 0; n < (q ? 4 : 2); n += 2) { + tmp = neon_load_reg(rm, n); + tmp2 = neon_load_reg(rd, n + 1); + neon_store_reg(rm, n, tmp2); + neon_store_reg(rd, n + 1, tmp); + } + } else { + goto elementwise; + } + break; + case NEON_2RM_VUZP: + if (gen_neon_unzip(rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VZIP: + if (gen_neon_zip(rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: + /* also VQMOVUN; op field and mnemonics don't line up */ + if (rm & 1) { + return 1; + } + TCGV_UNUSED(tmp2); + for (pass = 0; pass < 2; pass++) { + neon_load_reg64(cpu_V0, rm + pass); + tmp = tcg_temp_new_i32(); + gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size, + tmp, cpu_V0); + if (pass == 0) { + tmp2 = tmp; + } else { + neon_store_reg(rd, 0, tmp2); + neon_store_reg(rd, 1, tmp); + } + } + break; + case NEON_2RM_VSHLL: + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + gen_neon_widen(cpu_V0, tmp, size, 1); + tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size); + neon_store_reg64(cpu_V0, rd + pass); + } + break; + case NEON_2RM_VCVT_F16_F32: + if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || + q || (rm & 1)) { + return 1; + } + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); + gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1)); + gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2)); + gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3)); + neon_store_reg(rd, 0, tmp2); + tmp2 = tcg_temp_new_i32(); + gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + neon_store_reg(rd, 1, tmp2); + tcg_temp_free_i32(tmp); + break; + case NEON_2RM_VCVT_F32_F16: + if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || + q || (rd & 1)) { + return 1; + } + tmp3 = tcg_temp_new_i32(); + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + tcg_gen_ext16u_i32(tmp3, tmp); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_shri_i32(tmp3, tmp, 16); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1)); + tcg_temp_free_i32(tmp); + tcg_gen_ext16u_i32(tmp3, tmp2); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2)); + tcg_gen_shri_i32(tmp3, tmp2, 16); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3)); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp3); + break; + default: + elementwise: + for (pass = 0; pass < (q ? 4 : 2); pass++) { + if (neon_2rm_is_float_op(op)) { + tcg_gen_ld_f32(cpu_F0s, cpu_env, + neon_reg_offset(rm, pass)); + TCGV_UNUSED(tmp); + } else { + tmp = neon_load_reg(rm, pass); + } + switch (op) { + case NEON_2RM_VREV32: + switch (size) { + case 0: tcg_gen_bswap32_i32(tmp, tmp); break; + case 1: gen_swap_half(tmp); break; + default: abort(); + } + break; + case NEON_2RM_VREV16: + gen_rev16(tmp); + break; + case NEON_2RM_VCLS: + switch (size) { + case 0: gen_helper_neon_cls_s8(tmp, tmp); break; + case 1: gen_helper_neon_cls_s16(tmp, tmp); break; + case 2: gen_helper_neon_cls_s32(tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VCLZ: + switch (size) { + case 0: gen_helper_neon_clz_u8(tmp, tmp); break; + case 1: gen_helper_neon_clz_u16(tmp, tmp); break; + case 2: gen_helper_clz(tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VCNT: + gen_helper_neon_cnt_u8(tmp, tmp); + break; + case NEON_2RM_VMVN: + tcg_gen_not_i32(tmp, tmp); + break; + case NEON_2RM_VQABS: + switch (size) { + case 0: + gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); + break; + case 1: + gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); + break; + case 2: + gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VQNEG: + switch (size) { + case 0: + gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); + break; + case 1: + gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); + break; + case 2: + gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: + tmp2 = tcg_const_i32(0); + switch(size) { + case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free(tmp2); + if (op == NEON_2RM_VCLE0) { + tcg_gen_not_i32(tmp, tmp); + } + break; + case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: + tmp2 = tcg_const_i32(0); + switch(size) { + case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free(tmp2); + if (op == NEON_2RM_VCLT0) { + tcg_gen_not_i32(tmp, tmp); + } + break; + case NEON_2RM_VCEQ0: + tmp2 = tcg_const_i32(0); + switch(size) { + case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free(tmp2); + break; + case NEON_2RM_VABS: + switch(size) { + case 0: gen_helper_neon_abs_s8(tmp, tmp); break; + case 1: gen_helper_neon_abs_s16(tmp, tmp); break; + case 2: tcg_gen_abs_i32(tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VNEG: + tmp2 = tcg_const_i32(0); + gen_neon_rsb(size, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case NEON_2RM_VCGT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCGE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCEQ0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCLE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCLT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VABS_F: + gen_vfp_abs(0); + break; + case NEON_2RM_VNEG_F: + gen_vfp_neg(0); + break; + case NEON_2RM_VSWP: + tmp2 = neon_load_reg(rd, pass); + neon_store_reg(rm, pass, tmp2); + break; + case NEON_2RM_VTRN: + tmp2 = neon_load_reg(rd, pass); + switch (size) { + case 0: gen_neon_trn_u8(tmp, tmp2); break; + case 1: gen_neon_trn_u16(tmp, tmp2); break; + default: abort(); + } + neon_store_reg(rm, pass, tmp2); + break; + case NEON_2RM_VRECPE: + gen_helper_recpe_u32(tmp, tmp, cpu_env); + break; + case NEON_2RM_VRSQRTE: + gen_helper_rsqrte_u32(tmp, tmp, cpu_env); + break; + case NEON_2RM_VRECPE_F: + gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env); + break; + case NEON_2RM_VRSQRTE_F: + gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env); + break; + case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ + gen_vfp_sito(0, 1); + break; + case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ + gen_vfp_uito(0, 1); + break; + case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ + gen_vfp_tosiz(0, 1); + break; + case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ + gen_vfp_touiz(0, 1); + break; + default: + /* Reserved op values were caught by the + * neon_2rm_sizes[] check earlier. + */ + abort(); + } + if (neon_2rm_is_float_op(op)) { + tcg_gen_st_f32(cpu_F0s, cpu_env, + neon_reg_offset(rd, pass)); + } else { + neon_store_reg(rd, pass, tmp); + } + } + break; + } + } else if ((insn & (1 << 10)) == 0) { + /* VTBL, VTBX. */ + int n = ((insn >> 8) & 3) + 1; + if ((rn + n) > 32) { + /* This is UNPREDICTABLE; we choose to UNDEF to avoid the + * helper function running off the end of the register file. + */ + return 1; + } + n <<= 3; + if (insn & (1 << 6)) { + tmp = neon_load_reg(rd, 0); + } else { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } + tmp2 = neon_load_reg(rm, 0); + tmp4 = tcg_const_i32(rn); + tmp5 = tcg_const_i32(n); + gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5); + tcg_temp_free_i32(tmp); + if (insn & (1 << 6)) { + tmp = neon_load_reg(rd, 1); + } else { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } + tmp3 = neon_load_reg(rm, 1); + gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5); + tcg_temp_free_i32(tmp5); + tcg_temp_free_i32(tmp4); + neon_store_reg(rd, 0, tmp2); + neon_store_reg(rd, 1, tmp3); + tcg_temp_free_i32(tmp); + } else if ((insn & 0x380) == 0) { + /* VDUP */ + if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { + return 1; + } + if (insn & (1 << 19)) { + tmp = neon_load_reg(rm, 1); + } else { + tmp = neon_load_reg(rm, 0); + } + if (insn & (1 << 16)) { + gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8); + } else if (insn & (1 << 17)) { + if ((insn >> 18) & 1) + gen_neon_dup_high16(tmp); + else + gen_neon_dup_low16(tmp); + } + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + neon_store_reg(rd, pass, tmp2); + } + tcg_temp_free_i32(tmp); + } else { + return 1; + } + } + } + return 0; +} + +static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + int crn = (insn >> 16) & 0xf; + int crm = insn & 0xf; + int op1 = (insn >> 21) & 7; + int op2 = (insn >> 5) & 7; + int rt = (insn >> 12) & 0xf; + TCGv tmp; + + /* Minimal set of debug registers, since we don't support debug */ + if (op1 == 0 && crn == 0 && op2 == 0) { + switch (crm) { + case 0: + /* DBGDIDR: just RAZ. In particular this means the + * "debug architecture version" bits will read as + * a reserved value, which should cause Linux to + * not try to use the debug hardware. + */ + tmp = tcg_const_i32(0); + store_reg(s, rt, tmp); + return 0; + case 1: + case 2: + /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we + * don't implement memory mapped debug components + */ + if (ENABLE_ARCH_7) { + tmp = tcg_const_i32(0); + store_reg(s, rt, tmp); + return 0; + } + break; + default: + break; + } + } + + if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { + if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) { + /* TEECR */ + if (IS_USER(s)) + return 1; + tmp = load_cpu_field(teecr); + store_reg(s, rt, tmp); + return 0; + } + if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) { + /* TEEHBR */ + if (IS_USER(s) && (env->teecr & 1)) + return 1; + tmp = load_cpu_field(teehbr); + store_reg(s, rt, tmp); + return 0; + } + } + return 1; +} + +static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + int crn = (insn >> 16) & 0xf; + int crm = insn & 0xf; + int op1 = (insn >> 21) & 7; + int op2 = (insn >> 5) & 7; + int rt = (insn >> 12) & 0xf; + TCGv tmp; + + if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { + if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) { + /* TEECR */ + if (IS_USER(s)) + return 1; + tmp = load_reg(s, rt); + gen_helper_set_teecr(cpu_env, tmp); + tcg_temp_free_i32(tmp); + return 0; + } + if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) { + /* TEEHBR */ + if (IS_USER(s) && (env->teecr & 1)) + return 1; + tmp = load_reg(s, rt); + store_cpu_field(tmp, teehbr); + return 0; + } + } + return 1; +} + +static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) +{ + int cpnum; + + cpnum = (insn >> 8) & 0xf; + if (arm_feature(env, ARM_FEATURE_XSCALE) + && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum))) + return 1; + + switch (cpnum) { + case 0: + case 1: + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + return disas_iwmmxt_insn(env, s, insn); + } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { + return disas_dsp_insn(env, s, insn); + } + return 1; + case 10: + case 11: + return disas_vfp_insn (env, s, insn); + case 14: + /* Coprocessors 7-15 are architecturally reserved by ARM. + Unfortunately Intel decided to ignore this. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) + goto board; + if (insn & (1 << 20)) + return disas_cp14_read(env, s, insn); + else + return disas_cp14_write(env, s, insn); + case 15: + return disas_cp15_insn (env, s, insn); + default: + board: + /* Unknown coprocessor. See if the board has hooked it. */ + return disas_cp_insn (env, s, insn); + } +} + + +/* Store a 64-bit value to a register pair. Clobbers val. */ +static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) +{ + TCGv tmp; + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, val); + store_reg(s, rlow, tmp); + tmp = tcg_temp_new_i32(); + tcg_gen_shri_i64(val, val, 32); + tcg_gen_trunc_i64_i32(tmp, val); + store_reg(s, rhigh, tmp); +} + +/* load a 32-bit value from a register and perform a 64-bit accumulate. */ +static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) +{ + TCGv_i64 tmp; + TCGv tmp2; + + /* Load value and extend to 64 bits. */ + tmp = tcg_temp_new_i64(); + tmp2 = load_reg(s, rlow); + tcg_gen_extu_i32_i64(tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_add_i64(val, val, tmp); + tcg_temp_free_i64(tmp); +} + +/* load and add a 64-bit value from a register pair. */ +static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) +{ + TCGv_i64 tmp; + TCGv tmpl; + TCGv tmph; + + /* Load 64-bit value rd:rn. */ + tmpl = load_reg(s, rlow); + tmph = load_reg(s, rhigh); + tmp = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(tmp, tmpl, tmph); + tcg_temp_free_i32(tmpl); + tcg_temp_free_i32(tmph); + tcg_gen_add_i64(val, val, tmp); + tcg_temp_free_i64(tmp); +} + +/* Set N and Z flags from a 64-bit value. */ +static void gen_logicq_cc(TCGv_i64 val) +{ + TCGv tmp = tcg_temp_new_i32(); + gen_helper_logicq_cc(tmp, val); + gen_logic_CC(tmp); + tcg_temp_free_i32(tmp); +} + +/* Load/Store exclusive instructions are implemented by remembering + the value/address loaded, and seeing if these are the same + when the store is performed. This should be is sufficient to implement + the architecturally mandated semantics, and avoids having to monitor + regular stores. + + In system emulation mode only one CPU will be running at once, so + this sequence is effectively atomic. In user emulation mode we + throw an exception and handle the atomic operation elsewhere. */ +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, + TCGv addr, int size) +{ + TCGv tmp; + + switch (size) { + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + case 3: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: + abort(); + } + tcg_gen_mov_i32(cpu_exclusive_val, tmp); + store_reg(s, rt, tmp); + if (size == 3) { + TCGv tmp2 = tcg_temp_new_i32(); + tcg_gen_addi_i32(tmp2, addr, 4); + tmp = gen_ld32(tmp2, IS_USER(s)); + tcg_temp_free_i32(tmp2); + tcg_gen_mov_i32(cpu_exclusive_high, tmp); + store_reg(s, rt2, tmp); + } + tcg_gen_mov_i32(cpu_exclusive_addr, addr); +} + +static void gen_clrex(DisasContext *s) +{ + tcg_gen_movi_i32(cpu_exclusive_addr, -1); +} + +#ifdef CONFIG_USER_ONLY +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv addr, int size) +{ + tcg_gen_mov_i32(cpu_exclusive_test, addr); + tcg_gen_movi_i32(cpu_exclusive_info, + size | (rd << 4) | (rt << 8) | (rt2 << 12)); + gen_exception_insn(s, 4, EXCP_STREX); +} +#else +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv addr, int size) +{ + TCGv tmp; + int done_label; + int fail_label; + + /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { + [addr] = {Rt}; + {Rd} = 0; + } else { + {Rd} = 1; + } */ + fail_label = gen_new_label(); + done_label = gen_new_label(); + tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label); + switch (size) { + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + case 3: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: + abort(); + } + tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label); + tcg_temp_free_i32(tmp); + if (size == 3) { + TCGv tmp2 = tcg_temp_new_i32(); + tcg_gen_addi_i32(tmp2, addr, 4); + tmp = gen_ld32(tmp2, IS_USER(s)); + tcg_temp_free_i32(tmp2); + tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label); + tcg_temp_free_i32(tmp); + } + tmp = load_reg(s, rt); + switch (size) { + case 0: + gen_st8(tmp, addr, IS_USER(s)); + break; + case 1: + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: + case 3: + gen_st32(tmp, addr, IS_USER(s)); + break; + default: + abort(); + } + if (size == 3) { + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rt2); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_movi_i32(cpu_R[rd], 0); + tcg_gen_br(done_label); + gen_set_label(fail_label); + tcg_gen_movi_i32(cpu_R[rd], 1); + gen_set_label(done_label); + tcg_gen_movi_i32(cpu_exclusive_addr, -1); +} +#endif + +static void disas_arm_insn(CPUARMState * env, DisasContext *s) +{ + unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh; + TCGv tmp; + TCGv tmp2; + TCGv tmp3; + TCGv addr; + TCGv_i64 tmp64; + + insn = arm_ldl_code(s->pc, s->bswap_code); + s->pc += 4; + + /* M variants do not implement ARM mode. */ + if (IS_M(env)) + goto illegal_op; + cond = insn >> 28; + if (cond == 0xf){ + /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we + * choose to UNDEF. In ARMv5 and above the space is used + * for miscellaneous unconditional instructions. + */ + ARCH(5); + + /* Unconditional instructions. */ + if (((insn >> 25) & 7) == 1) { + /* NEON Data processing. */ + if (!arm_feature(env, ARM_FEATURE_NEON)) + goto illegal_op; + + if (disas_neon_data_insn(env, s, insn)) + goto illegal_op; + return; + } + if ((insn & 0x0f100000) == 0x04000000) { + /* NEON load/store. */ + if (!arm_feature(env, ARM_FEATURE_NEON)) + goto illegal_op; + + if (disas_neon_ls_insn(env, s, insn)) + goto illegal_op; + return; + } + if (((insn & 0x0f30f000) == 0x0510f000) || + ((insn & 0x0f30f010) == 0x0710f000)) { + if ((insn & (1 << 22)) == 0) { + /* PLDW; v7MP */ + if (!arm_feature(env, ARM_FEATURE_V7MP)) { + goto illegal_op; + } + } + /* Otherwise PLD; v5TE+ */ + ARCH(5TE); + return; + } + if (((insn & 0x0f70f000) == 0x0450f000) || + ((insn & 0x0f70f010) == 0x0650f000)) { + ARCH(7); + return; /* PLI; V7 */ + } + if (((insn & 0x0f700000) == 0x04100000) || + ((insn & 0x0f700010) == 0x06100000)) { + if (!arm_feature(env, ARM_FEATURE_V7MP)) { + goto illegal_op; + } + return; /* v7MP: Unallocated memory hint: must NOP */ + } + + if ((insn & 0x0ffffdff) == 0x01010000) { + ARCH(6); + /* setend */ + if (((insn >> 9) & 1) != s->bswap_code) { + /* Dynamic endianness switching not implemented. */ + goto illegal_op; + } + return; + } else if ((insn & 0x0fffff00) == 0x057ff000) { + switch ((insn >> 4) & 0xf) { + case 1: /* clrex */ + ARCH(6K); + gen_clrex(s); + return; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + ARCH(7); + /* We don't emulate caches so these are a no-op. */ + return; + default: + goto illegal_op; + } + } else if ((insn & 0x0e5fffe0) == 0x084d0500) { + /* srs */ + int32_t offset; + if (IS_USER(s)) + goto illegal_op; + ARCH(6); + op1 = (insn & 0x1f); + addr = tcg_temp_new_i32(); + tmp = tcg_const_i32(op1); + gen_helper_get_r13_banked(addr, cpu_env, tmp); + tcg_temp_free_i32(tmp); + i = (insn >> 23) & 3; + switch (i) { + case 0: offset = -4; break; /* DA */ + case 1: offset = 0; break; /* IA */ + case 2: offset = -8; break; /* DB */ + case 3: offset = 4; break; /* IB */ + default: abort(); + } + if (offset) + tcg_gen_addi_i32(addr, addr, offset); + tmp = load_reg(s, 14); + gen_st32(tmp, addr, 0); + tmp = load_cpu_field(spsr); + tcg_gen_addi_i32(addr, addr, 4); + gen_st32(tmp, addr, 0); + if (insn & (1 << 21)) { + /* Base writeback. */ + switch (i) { + case 0: offset = -8; break; + case 1: offset = 4; break; + case 2: offset = -4; break; + case 3: offset = 0; break; + default: abort(); + } + if (offset) + tcg_gen_addi_i32(addr, addr, offset); + tmp = tcg_const_i32(op1); + gen_helper_set_r13_banked(cpu_env, tmp, addr); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(addr); + } else { + tcg_temp_free_i32(addr); + } + return; + } else if ((insn & 0x0e50ffe0) == 0x08100a00) { + /* rfe */ + int32_t offset; + if (IS_USER(s)) + goto illegal_op; + ARCH(6); + rn = (insn >> 16) & 0xf; + addr = load_reg(s, rn); + i = (insn >> 23) & 3; + switch (i) { + case 0: offset = -4; break; /* DA */ + case 1: offset = 0; break; /* IA */ + case 2: offset = -8; break; /* DB */ + case 3: offset = 4; break; /* IB */ + default: abort(); + } + if (offset) + tcg_gen_addi_i32(addr, addr, offset); + /* Load PC into tmp and CPSR into tmp2. */ + tmp = gen_ld32(addr, 0); + tcg_gen_addi_i32(addr, addr, 4); + tmp2 = gen_ld32(addr, 0); + if (insn & (1 << 21)) { + /* Base writeback. */ + switch (i) { + case 0: offset = -8; break; + case 1: offset = 4; break; + case 2: offset = -4; break; + case 3: offset = 0; break; + default: abort(); + } + if (offset) + tcg_gen_addi_i32(addr, addr, offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + gen_rfe(s, tmp, tmp2); + return; + } else if ((insn & 0x0e000000) == 0x0a000000) { + /* branch link and change to thumb (blx ) */ + int32_t offset; + + val = (uint32_t)s->pc; + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + store_reg(s, 14, tmp); + /* Sign-extend the 24-bit offset */ + offset = (((int32_t)insn) << 8) >> 8; + /* offset * 4 + bit24 * 2 + (thumb bit) */ + val += (offset << 2) | ((insn >> 23) & 2) | 1; + /* pipeline offset */ + val += 4; + /* protected by ARCH(5); above, near the start of uncond block */ + gen_bx_im(s, val); + return; + } else if ((insn & 0x0e000f00) == 0x0c000100) { + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + /* iWMMXt register transfer. */ + if (env->cp15.c15_cpar & (1 << 1)) + if (!disas_iwmmxt_insn(env, s, insn)) + return; + } + } else if ((insn & 0x0fe00000) == 0x0c400000) { + /* Coprocessor double register transfer. */ + ARCH(5TE); + } else if ((insn & 0x0f000010) == 0x0e000010) { + /* Additional coprocessor register transfer. */ + } else if ((insn & 0x0ff10020) == 0x01000000) { + uint32_t mask; + uint32_t val; + /* cps (privileged) */ + if (IS_USER(s)) + return; + mask = val = 0; + if (insn & (1 << 19)) { + if (insn & (1 << 8)) + mask |= CPSR_A; + if (insn & (1 << 7)) + mask |= CPSR_I; + if (insn & (1 << 6)) + mask |= CPSR_F; + if (insn & (1 << 18)) + val |= mask; + } + if (insn & (1 << 17)) { + mask |= CPSR_M; + val |= (insn & 0x1f); + } + if (mask) { + gen_set_psr_im(s, mask, 0, val); + } + return; + } + goto illegal_op; + } + if (cond != 0xe) { + /* if not always execute, we generate a conditional jump to + next instruction */ + s->condlabel = gen_new_label(); + gen_test_cc(cond ^ 1, s->condlabel); + s->condjmp = 1; + } + if ((insn & 0x0f900000) == 0x03000000) { + if ((insn & (1 << 21)) == 0) { + ARCH(6T2); + rd = (insn >> 12) & 0xf; + val = ((insn >> 4) & 0xf000) | (insn & 0xfff); + if ((insn & (1 << 22)) == 0) { + /* MOVW */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + } else { + /* MOVT */ + tmp = load_reg(s, rd); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_ori_i32(tmp, tmp, val << 16); + } + store_reg(s, rd, tmp); + } else { + if (((insn >> 12) & 0xf) != 0xf) + goto illegal_op; + if (((insn >> 16) & 0xf) == 0) { + gen_nop_hint(s, insn & 0xff); + } else { + /* CPSR = immediate */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) + val = (val >> shift) | (val << (32 - shift)); + i = ((insn & (1 << 22)) != 0); + if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val)) + goto illegal_op; + } + } + } else if ((insn & 0x0f900000) == 0x01000000 + && (insn & 0x00000090) != 0x00000090) { + /* miscellaneous instructions */ + op1 = (insn >> 21) & 3; + sh = (insn >> 4) & 0xf; + rm = insn & 0xf; + switch (sh) { + case 0x0: /* move program status register */ + if (op1 & 1) { + /* PSR = reg */ + tmp = load_reg(s, rm); + i = ((op1 & 2) != 0); + if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp)) + goto illegal_op; + } else { + /* reg = PSR */ + rd = (insn >> 12) & 0xf; + if (op1 & 2) { + if (IS_USER(s)) + goto illegal_op; + tmp = load_cpu_field(spsr); + } else { + tmp = tcg_temp_new_i32(); + gen_helper_cpsr_read(tmp); + } + store_reg(s, rd, tmp); + } + break; + case 0x1: + if (op1 == 1) { + /* branch/exchange thumb (bx). */ + ARCH(4T); + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else if (op1 == 3) { + /* clz */ + ARCH(5); + rd = (insn >> 12) & 0xf; + tmp = load_reg(s, rm); + gen_helper_clz(tmp, tmp); + store_reg(s, rd, tmp); + } else { + goto illegal_op; + } + break; + case 0x2: + if (op1 == 1) { + ARCH(5J); /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else { + goto illegal_op; + } + break; + case 0x3: + if (op1 != 1) + goto illegal_op; + + ARCH(5); + /* branch link/exchange thumb (blx) */ + tmp = load_reg(s, rm); + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + break; + case 0x5: /* saturating add/subtract */ + ARCH(5TE); + rd = (insn >> 12) & 0xf; + rn = (insn >> 16) & 0xf; + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rn); + if (op1 & 2) + gen_helper_double_saturate(tmp2, tmp2); + if (op1 & 1) + gen_helper_sub_saturate(tmp, tmp, tmp2); + else + gen_helper_add_saturate(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 7: + /* SMC instruction (op1 == 3) + and undefined instructions (op1 == 0 || op1 == 2) + will trap */ + if (op1 != 1) { + goto illegal_op; + } + /* bkpt */ + ARCH(5); + gen_exception_insn(s, 4, EXCP_BKPT); + break; + case 0x8: /* signed multiply */ + case 0xa: + case 0xc: + case 0xe: + ARCH(5TE); + rs = (insn >> 8) & 0xf; + rn = (insn >> 12) & 0xf; + rd = (insn >> 16) & 0xf; + if (op1 == 1) { + /* (32 * 16) >> 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (sh & 4) + tcg_gen_sari_i32(tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + tcg_gen_shri_i64(tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + if ((sh & 2) == 0) { + tmp2 = load_reg(s, rn); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rd, tmp); + } else { + /* 16 * 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_mulxy(tmp, tmp2, sh & 2, sh & 4); + tcg_temp_free_i32(tmp2); + if (op1 == 2) { + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rn, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); + } else { + if (op1 == 0) { + tmp2 = load_reg(s, rn); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rd, tmp); + } + } + break; + default: + goto illegal_op; + } + } else if (((insn & 0x0e000000) == 0 && + (insn & 0x00000090) != 0x90) || + ((insn & 0x0e000000) == (1 << 25))) { + int set_cc, logic_cc, shiftop; + + op1 = (insn >> 21) & 0xf; + set_cc = (insn >> 20) & 1; + logic_cc = table_logic_cc[op1] & set_cc; + + /* data processing instruction */ + if (insn & (1 << 25)) { + /* immediate operand */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) { + val = (val >> shift) | (val << (32 - shift)); + } + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, val); + if (logic_cc && shift) { + gen_set_CF_bit31(tmp2); + } + } else { + /* register */ + rm = (insn) & 0xf; + tmp2 = load_reg(s, rm); + shiftop = (insn >> 5) & 3; + if (!(insn & (1 << 4))) { + shift = (insn >> 7) & 0x1f; + gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); + } else { + rs = (insn >> 8) & 0xf; + tmp = load_reg(s, rs); + gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc); + } + } + if (op1 != 0x0f && op1 != 0x0d) { + rn = (insn >> 16) & 0xf; + tmp = load_reg(s, rn); + } else { + TCGV_UNUSED(tmp); + } + rd = (insn >> 12) & 0xf; + switch(op1) { + case 0x00: + tcg_gen_and_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x01: + tcg_gen_xor_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x02: + if (set_cc && rd == 15) { + /* SUBS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + gen_helper_sub_cc(tmp, tmp, tmp2); + gen_exception_return(s, tmp); + } else { + if (set_cc) { + gen_helper_sub_cc(tmp, tmp, tmp2); + } else { + tcg_gen_sub_i32(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + } + break; + case 0x03: + if (set_cc) { + gen_helper_sub_cc(tmp, tmp2, tmp); + } else { + tcg_gen_sub_i32(tmp, tmp2, tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x04: + if (set_cc) { + gen_helper_add_cc(tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x05: + if (set_cc) { + gen_helper_adc_cc(tmp, tmp, tmp2); + } else { + gen_add_carry(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x06: + if (set_cc) { + gen_helper_sbc_cc(tmp, tmp, tmp2); + } else { + gen_sub_carry(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x07: + if (set_cc) { + gen_helper_sbc_cc(tmp, tmp2, tmp); + } else { + gen_sub_carry(tmp, tmp2, tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x08: + if (set_cc) { + tcg_gen_and_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + } + tcg_temp_free_i32(tmp); + break; + case 0x09: + if (set_cc) { + tcg_gen_xor_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + } + tcg_temp_free_i32(tmp); + break; + case 0x0a: + if (set_cc) { + gen_helper_sub_cc(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp); + break; + case 0x0b: + if (set_cc) { + gen_helper_add_cc(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp); + break; + case 0x0c: + tcg_gen_or_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x0d: + if (logic_cc && rd == 15) { + /* MOVS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + gen_exception_return(s, tmp2); + } else { + if (logic_cc) { + gen_logic_CC(tmp2); + } + store_reg_bx(env, s, rd, tmp2); + } + break; + case 0x0e: + tcg_gen_andc_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + default: + case 0x0f: + tcg_gen_not_i32(tmp2, tmp2); + if (logic_cc) { + gen_logic_CC(tmp2); + } + store_reg_bx(env, s, rd, tmp2); + break; + } + if (op1 != 0x0f && op1 != 0x0d) { + tcg_temp_free_i32(tmp2); + } + } else { + /* other instructions */ + op1 = (insn >> 24) & 0xf; + switch(op1) { + case 0x0: + case 0x1: + /* multiplies, extra load/stores */ + sh = (insn >> 5) & 3; + if (sh == 0) { + if (op1 == 0x0) { + rd = (insn >> 16) & 0xf; + rn = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + rm = (insn) & 0xf; + op1 = (insn >> 20) & 0xf; + switch (op1) { + case 0: case 1: case 2: case 3: case 6: + /* 32 bit mul */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tcg_gen_mul_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (insn & (1 << 22)) { + /* Subtract (mls) */ + ARCH(6T2); + tmp2 = load_reg(s, rn); + tcg_gen_sub_i32(tmp, tmp2, tmp); + tcg_temp_free_i32(tmp2); + } else if (insn & (1 << 21)) { + /* Add */ + tmp2 = load_reg(s, rn); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + if (insn & (1 << 20)) + gen_logic_CC(tmp); + store_reg(s, rd, tmp); + break; + case 4: + /* 64 bit mul double accumulate (UMAAL) */ + ARCH(6); + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + gen_addq_lo(s, tmp64, rn); + gen_addq_lo(s, tmp64, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); + break; + case 8: case 9: case 10: case 11: + case 12: case 13: case 14: case 15: + /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + if (insn & (1 << 22)) { + tmp64 = gen_muls_i64_i32(tmp, tmp2); + } else { + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + } + if (insn & (1 << 21)) { /* mult accumulate */ + gen_addq(s, tmp64, rn, rd); + } + if (insn & (1 << 20)) { + gen_logicq_cc(tmp64); + } + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); + break; + default: + goto illegal_op; + } + } else { + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (insn & (1 << 23)) { + /* load/store exclusive */ + op1 = (insn >> 21) & 0x3; + if (op1) + ARCH(6K); + else + ARCH(6); + addr = tcg_temp_local_new_i32(); + load_reg_var(s, addr, rn); + if (insn & (1 << 20)) { + switch (op1) { + case 0: /* ldrex */ + gen_load_exclusive(s, rd, 15, addr, 2); + break; + case 1: /* ldrexd */ + gen_load_exclusive(s, rd, rd + 1, addr, 3); + break; + case 2: /* ldrexb */ + gen_load_exclusive(s, rd, 15, addr, 0); + break; + case 3: /* ldrexh */ + gen_load_exclusive(s, rd, 15, addr, 1); + break; + default: + abort(); + } + } else { + rm = insn & 0xf; + switch (op1) { + case 0: /* strex */ + gen_store_exclusive(s, rd, rm, 15, addr, 2); + break; + case 1: /* strexd */ + gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); + break; + case 2: /* strexb */ + gen_store_exclusive(s, rd, rm, 15, addr, 0); + break; + case 3: /* strexh */ + gen_store_exclusive(s, rd, rm, 15, addr, 1); + break; + default: + abort(); + } + } + tcg_temp_free(addr); + } else { + /* SWP instruction */ + rm = (insn) & 0xf; + + /* ??? This is not really atomic. However we know + we never have multiple CPUs running in parallel, + so it is good enough. */ + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + if (insn & (1 << 22)) { + tmp2 = gen_ld8u(addr, IS_USER(s)); + gen_st8(tmp, addr, IS_USER(s)); + } else { + tmp2 = gen_ld32(addr, IS_USER(s)); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + store_reg(s, rd, tmp2); + } + } + } else { + int address_offset; + int load; + /* Misc load/store */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + addr = load_reg(s, rn); + if (insn & (1 << 24)) + gen_add_datah_offset(s, insn, 0, addr); + address_offset = 0; + if (insn & (1 << 20)) { + /* load */ + switch(sh) { + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + tmp = gen_ld8s(addr, IS_USER(s)); + break; + default: + case 3: + tmp = gen_ld16s(addr, IS_USER(s)); + break; + } + load = 1; + } else if (sh & 2) { + ARCH(5TE); + /* doubleword */ + if (sh & 1) { + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rd + 1); + gen_st32(tmp, addr, IS_USER(s)); + load = 0; + } else { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + tcg_gen_addi_i32(addr, addr, 4); + tmp = gen_ld32(addr, IS_USER(s)); + rd++; + load = 1; + } + address_offset = -4; + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st16(tmp, addr, IS_USER(s)); + load = 0; + } + /* Perform base writeback before the loaded value to + ensure correct behavior with overlapping index registers. + ldrd with base writeback is is undefined if the + destination and index registers overlap. */ + if (!(insn & (1 << 24))) { + gen_add_datah_offset(s, insn, address_offset, addr); + store_reg(s, rn, addr); + } else if (insn & (1 << 21)) { + if (address_offset) + tcg_gen_addi_i32(addr, addr, address_offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + if (load) { + /* Complete the load. */ + store_reg(s, rd, tmp); + } + } + break; + case 0x4: + case 0x5: + goto do_ldst; + case 0x6: + case 0x7: + if (insn & (1 << 4)) { + ARCH(6); + /* Armv6 Media instructions. */ + rm = insn & 0xf; + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + switch ((insn >> 23) & 3) { + case 0: /* Parallel add/subtract. */ + op1 = (insn >> 20) & 7; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + sh = (insn >> 5) & 7; + if ((op1 & 3) == 0 || sh == 5 || sh == 6) + goto illegal_op; + gen_arm_parallel_addsub(op1, sh, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 1: + if ((insn & 0x00700020) == 0) { + /* Halfword pack. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp2, tmp2, shift); + tcg_gen_andi_i32(tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tmp2, tmp2); + } else { + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + } + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00200020) == 0x00200000) { + /* [us]sat */ + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp, tmp, shift); + } else { + tcg_gen_shli_i32(tmp, tmp, shift); + } + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(sh); + if (insn & (1 << 22)) + gen_helper_usat(tmp, tmp, tmp2); + else + gen_helper_ssat(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00300fe0) == 0x00200f20) { + /* [us]sat16 */ + tmp = load_reg(s, rm); + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(sh); + if (insn & (1 << 22)) + gen_helper_usat16(tmp, tmp, tmp2); + else + gen_helper_ssat16(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00700fe0) == 0x00000fa0) { + /* Select bytes. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x000003e0) == 0x00000060) { + tmp = load_reg(s, rm); + shift = (insn >> 10) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tmp, tmp, shift * 8); + op1 = (insn >> 20) & 7; + switch (op1) { + case 0: gen_sxtb16(tmp); break; + case 2: gen_sxtb(tmp); break; + case 3: gen_sxth(tmp); break; + case 4: gen_uxtb16(tmp); break; + case 6: gen_uxtb(tmp); break; + case 7: gen_uxth(tmp); break; + default: goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op1 & 3) == 0) { + gen_add16(tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + } + store_reg(s, rd, tmp); + } else if ((insn & 0x003f0f60) == 0x003f0f20) { + /* rev */ + tmp = load_reg(s, rm); + if (insn & (1 << 22)) { + if (insn & (1 << 7)) { + gen_revsh(tmp); + } else { + ARCH(6T2); + gen_helper_rbit(tmp, tmp); + } + } else { + if (insn & (1 << 7)) + gen_rev16(tmp); + else + tcg_gen_bswap32_i32(tmp, tmp); + } + store_reg(s, rd, tmp); + } else { + goto illegal_op; + } + break; + case 2: /* Multiplies (Type 3). */ + switch ((insn >> 20) & 0x7) { + case 5: + if (((insn >> 6) ^ (insn >> 7)) & 1) { + /* op2 not 00x or 11x : UNDEF */ + goto illegal_op; + } + /* Signed multiply most significant [accumulate]. + (SMMUL, SMMLA, SMMLS) */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + + if (rd != 15) { + tmp = load_reg(s, rd); + if (insn & (1 << 6)) { + tmp64 = gen_subq_msw(tmp64, tmp); + } else { + tmp64 = gen_addq_msw(tmp64, tmp); + } + } + if (insn & (1 << 5)) { + tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + store_reg(s, rn, tmp); + break; + case 0: + case 4: + /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ + if (insn & (1 << 7)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 5)) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (insn & (1 << 6)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a signed + * operation, in which case we must set the Q flag. + */ + gen_helper_add_setq(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + if (insn & (1 << 22)) { + /* smlald, smlsld */ + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rd, rn); + gen_storeq_reg(s, rd, rn, tmp64); + tcg_temp_free_i64(tmp64); + } else { + /* smuad, smusd, smlad, smlsd */ + if (rd != 15) + { + tmp2 = load_reg(s, rd); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rn, tmp); + } + break; + case 1: + case 3: + /* SDIV, UDIV */ + if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) { + goto illegal_op; + } + if (((insn >> 5) & 7) || (rd != 15)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 21)) { + gen_helper_udiv(tmp, tmp, tmp2); + } else { + gen_helper_sdiv(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + store_reg(s, rn, tmp); + break; + default: + goto illegal_op; + } + break; + case 3: + op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); + switch (op1) { + case 0: /* Unsigned sum of absolute differences. */ + ARCH(6); + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_helper_usad8(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rd != 15) { + tmp2 = load_reg(s, rd); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rn, tmp); + break; + case 0x20: case 0x24: case 0x28: case 0x2c: + /* Bitfield insert/clear. */ + ARCH(6T2); + shift = (insn >> 7) & 0x1f; + i = (insn >> 16) & 0x1f; + i = i + 1 - shift; + if (rm == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rm); + } + if (i != 32) { + tmp2 = load_reg(s, rd); + gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rd, tmp); + break; + case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */ + case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */ + ARCH(6T2); + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + i = ((insn >> 16) & 0x1f) + 1; + if (shift + i > 32) + goto illegal_op; + if (i < 32) { + if (op1 & 0x20) { + gen_ubfx(tmp, shift, (1u << i) - 1); + } else { + gen_sbfx(tmp, shift, i); + } + } + store_reg(s, rd, tmp); + break; + default: + goto illegal_op; + } + break; + } + break; + } + do_ldst: + /* Check for undefined extension instructions + * per the ARM Bible IE: + * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx + */ + sh = (0xf << 20) | (0xf << 4); + if (op1 == 0x7 && ((insn & sh) == sh)) + { + goto illegal_op; + } + /* load/store byte/word */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + tmp2 = load_reg(s, rn); + i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000); + if (insn & (1 << 24)) + gen_add_data_offset(s, insn, tmp2); + if (insn & (1 << 20)) { + /* load */ + if (insn & (1 << 22)) { + tmp = gen_ld8u(tmp2, i); + } else { + tmp = gen_ld32(tmp2, i); + } + } else { + /* store */ + tmp = load_reg(s, rd); + if (insn & (1 << 22)) + gen_st8(tmp, tmp2, i); + else + gen_st32(tmp, tmp2, i); + } + if (!(insn & (1 << 24))) { + gen_add_data_offset(s, insn, tmp2); + store_reg(s, rn, tmp2); + } else if (insn & (1 << 21)) { + store_reg(s, rn, tmp2); + } else { + tcg_temp_free_i32(tmp2); + } + if (insn & (1 << 20)) { + /* Complete the load. */ + store_reg_from_load(env, s, rd, tmp); + } + break; + case 0x08: + case 0x09: + { + int j, n, user, loaded_base; + TCGv loaded_var; + /* load/store multiple words */ + /* XXX: store correct base if write back */ + user = 0; + if (insn & (1 << 22)) { + if (IS_USER(s)) + goto illegal_op; /* only usable in supervisor mode */ + + if ((insn & (1 << 15)) == 0) + user = 1; + } + rn = (insn >> 16) & 0xf; + addr = load_reg(s, rn); + + /* compute total size */ + loaded_base = 0; + TCGV_UNUSED(loaded_var); + n = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) + n++; + } + /* XXX: test invalid n == 0 case ? */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + tcg_gen_addi_i32(addr, addr, 4); + } else { + /* post increment */ + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + tcg_gen_addi_i32(addr, addr, -(n * 4)); + } else { + /* post decrement */ + if (n != 1) + tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); + } + } + j = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) { + if (insn & (1 << 20)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + if (user) { + tmp2 = tcg_const_i32(i); + gen_helper_set_user_reg(tmp2, tmp); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + } else if (i == rn) { + loaded_var = tmp; + loaded_base = 1; + } else { + store_reg_from_load(env, s, i, tmp); + } + } else { + /* store */ + if (i == 15) { + /* special case: r15 = PC + 8 */ + val = (long)s->pc + 4; + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + } else if (user) { + tmp = tcg_temp_new_i32(); + tmp2 = tcg_const_i32(i); + gen_helper_get_user_reg(tmp, tmp2); + tcg_temp_free_i32(tmp2); + } else { + tmp = load_reg(s, i); + } + gen_st32(tmp, addr, IS_USER(s)); + } + j++; + /* no need to add after the last transfer */ + if (j != n) + tcg_gen_addi_i32(addr, addr, 4); + } + } + if (insn & (1 << 21)) { + /* write back */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + } else { + /* post increment */ + tcg_gen_addi_i32(addr, addr, 4); + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + if (n != 1) + tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); + } else { + /* post decrement */ + tcg_gen_addi_i32(addr, addr, -(n * 4)); + } + } + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + if (loaded_base) { + store_reg(s, rn, loaded_var); + } + if ((insn & (1 << 22)) && !user) { + /* Restore CPSR from SPSR. */ + tmp = load_cpu_field(spsr); + gen_set_cpsr(tmp, 0xffffffff); + tcg_temp_free_i32(tmp); + s->is_jmp = DISAS_UPDATE; + } + } + break; + case 0xa: + case 0xb: + { + int32_t offset; + + /* branch (and link) */ + val = (int32_t)s->pc; + if (insn & (1 << 24)) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + store_reg(s, 14, tmp); + } + offset = (((int32_t)insn << 8) >> 8); + val += (offset << 2) + 4; + gen_jmp(s, val); + } + break; + case 0xc: + case 0xd: + case 0xe: + /* Coprocessor. */ + if (disas_coproc_insn(env, s, insn)) + goto illegal_op; + break; + case 0xf: + /* swi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_SWI; + break; + default: + illegal_op: + gen_exception_insn(s, 4, EXCP_UDEF); + break; + } + } +} + +/* Return true if this is a Thumb-2 logical op. */ +static int +thumb2_logic_op(int op) +{ + return (op < 8); +} + +/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero + then set condition code flags based on the result of the operation. + If SHIFTER_OUT is nonzero then set the carry flag for logical operations + to the high bit of T1. + Returns zero if the opcode is valid. */ + +static int +gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1) +{ + int logic_cc; + + logic_cc = 0; + switch (op) { + case 0: /* and */ + tcg_gen_and_i32(t0, t0, t1); + logic_cc = conds; + break; + case 1: /* bic */ + tcg_gen_andc_i32(t0, t0, t1); + logic_cc = conds; + break; + case 2: /* orr */ + tcg_gen_or_i32(t0, t0, t1); + logic_cc = conds; + break; + case 3: /* orn */ + tcg_gen_orc_i32(t0, t0, t1); + logic_cc = conds; + break; + case 4: /* eor */ + tcg_gen_xor_i32(t0, t0, t1); + logic_cc = conds; + break; + case 8: /* add */ + if (conds) + gen_helper_add_cc(t0, t0, t1); + else + tcg_gen_add_i32(t0, t0, t1); + break; + case 10: /* adc */ + if (conds) + gen_helper_adc_cc(t0, t0, t1); + else + gen_adc(t0, t1); + break; + case 11: /* sbc */ + if (conds) + gen_helper_sbc_cc(t0, t0, t1); + else + gen_sub_carry(t0, t0, t1); + break; + case 13: /* sub */ + if (conds) + gen_helper_sub_cc(t0, t0, t1); + else + tcg_gen_sub_i32(t0, t0, t1); + break; + case 14: /* rsb */ + if (conds) + gen_helper_sub_cc(t0, t1, t0); + else + tcg_gen_sub_i32(t0, t1, t0); + break; + default: /* 5, 6, 7, 9, 12, 15. */ + return 1; + } + if (logic_cc) { + gen_logic_CC(t0); + if (shifter_out) + gen_set_CF_bit31(t1); + } + return 0; +} + +/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction + is not legal. */ +static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) +{ + uint32_t insn, imm, shift, offset; + uint32_t rd, rn, rm, rs; + TCGv tmp; + TCGv tmp2; + TCGv tmp3; + TCGv addr; + TCGv_i64 tmp64; + int op; + int shiftop; + int conds; + int logic_cc; + + if (!(arm_feature(env, ARM_FEATURE_THUMB2) + || arm_feature (env, ARM_FEATURE_M))) { + /* Thumb-1 cores may need to treat bl and blx as a pair of + 16-bit instructions to get correct prefetch abort behavior. */ + insn = insn_hw1; + if ((insn & (1 << 12)) == 0) { + ARCH(5); + /* Second half of blx. */ + offset = ((insn & 0x7ff) << 1); + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + return 0; + } + if (insn & (1 << 11)) { + /* Second half of bl. */ + offset = ((insn & 0x7ff) << 1) | 1; + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + return 0; + } + if ((s->pc & ~TARGET_PAGE_MASK) == 0) { + /* Instruction spans a page boundary. Implement it as two + 16-bit instructions in case the second half causes an + prefetch abort. */ + offset = ((int32_t)insn << 21) >> 9; + tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); + return 0; + } + /* Fall through to 32-bit decode. */ + } + + insn = arm_lduw_code(s->pc, s->bswap_code); + s->pc += 2; + insn |= (uint32_t)insn_hw1 << 16; + + if ((insn & 0xf800e800) != 0xf000e800) { + ARCH(6T2); + } + + rn = (insn >> 16) & 0xf; + rs = (insn >> 12) & 0xf; + rd = (insn >> 8) & 0xf; + rm = insn & 0xf; + switch ((insn >> 25) & 0xf) { + case 0: case 1: case 2: case 3: + /* 16-bit instructions. Should never happen. */ + abort(); + case 4: + if (insn & (1 << 22)) { + /* Other load/store, table branch. */ + if (insn & 0x01200000) { + /* Load/store doubleword. */ + if (rn == 15) { + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~3); + } else { + addr = load_reg(s, rn); + } + offset = (insn & 0xff) * 4; + if ((insn & (1 << 23)) == 0) + offset = -offset; + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, offset); + offset = 0; + } + if (insn & (1 << 20)) { + /* ldrd */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rs, tmp); + tcg_gen_addi_i32(addr, addr, 4); + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* strd */ + tmp = load_reg(s, rs); + gen_st32(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + } + if (insn & (1 << 21)) { + /* Base writeback. */ + if (rn == 15) + goto illegal_op; + tcg_gen_addi_i32(addr, addr, offset - 4); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + } else if ((insn & (1 << 23)) == 0) { + /* Load/store exclusive word. */ + addr = tcg_temp_local_new(); + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2); + if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, 15, addr, 2); + } else { + gen_store_exclusive(s, rd, rs, 15, addr, 2); + } + tcg_temp_free(addr); + } else if ((insn & (1 << 6)) == 0) { + /* Table Branch. */ + if (rn == 15) { + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc); + } else { + addr = load_reg(s, rn); + } + tmp = load_reg(s, rm); + tcg_gen_add_i32(addr, addr, tmp); + if (insn & (1 << 4)) { + /* tbh */ + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + tmp = gen_ld16u(addr, IS_USER(s)); + } else { /* tbb */ + tcg_temp_free_i32(tmp); + tmp = gen_ld8u(addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + tcg_gen_shli_i32(tmp, tmp, 1); + tcg_gen_addi_i32(tmp, tmp, s->pc); + store_reg(s, 15, tmp); + } else { + /* Load/store exclusive byte/halfword/doubleword. */ + ARCH(7); + op = (insn >> 4) & 0x3; + if (op == 2) { + goto illegal_op; + } + addr = tcg_temp_local_new(); + load_reg_var(s, addr, rn); + if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, rd, addr, op); + } else { + gen_store_exclusive(s, rm, rs, rd, addr, op); + } + tcg_temp_free(addr); + } + } else { + /* Load/store multiple, RFE, SRS. */ + if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { + /* Not available in user mode. */ + if (IS_USER(s)) + goto illegal_op; + if (insn & (1 << 20)) { + /* rfe */ + addr = load_reg(s, rn); + if ((insn & (1 << 24)) == 0) + tcg_gen_addi_i32(addr, addr, -8); + /* Load PC into tmp and CPSR into tmp2. */ + tmp = gen_ld32(addr, 0); + tcg_gen_addi_i32(addr, addr, 4); + tmp2 = gen_ld32(addr, 0); + if (insn & (1 << 21)) { + /* Base writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, 4); + } else { + tcg_gen_addi_i32(addr, addr, -4); + } + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + gen_rfe(s, tmp, tmp2); + } else { + /* srs */ + op = (insn & 0x1f); + addr = tcg_temp_new_i32(); + tmp = tcg_const_i32(op); + gen_helper_get_r13_banked(addr, cpu_env, tmp); + tcg_temp_free_i32(tmp); + if ((insn & (1 << 24)) == 0) { + tcg_gen_addi_i32(addr, addr, -8); + } + tmp = load_reg(s, 14); + gen_st32(tmp, addr, 0); + tcg_gen_addi_i32(addr, addr, 4); + tmp = tcg_temp_new_i32(); + gen_helper_cpsr_read(tmp); + gen_st32(tmp, addr, 0); + if (insn & (1 << 21)) { + if ((insn & (1 << 24)) == 0) { + tcg_gen_addi_i32(addr, addr, -4); + } else { + tcg_gen_addi_i32(addr, addr, 4); + } + tmp = tcg_const_i32(op); + gen_helper_set_r13_banked(cpu_env, tmp, addr); + tcg_temp_free_i32(tmp); + } else { + tcg_temp_free_i32(addr); + } + } + } else { + int i, loaded_base = 0; + TCGv loaded_var; + /* Load/store multiple. */ + addr = load_reg(s, rn); + offset = 0; + for (i = 0; i < 16; i++) { + if (insn & (1 << i)) + offset += 4; + } + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, -offset); + } + + TCGV_UNUSED(loaded_var); + for (i = 0; i < 16; i++) { + if ((insn & (1 << i)) == 0) + continue; + if (insn & (1 << 20)) { + /* Load. */ + tmp = gen_ld32(addr, IS_USER(s)); + if (i == 15) { + gen_bx(s, tmp); + } else if (i == rn) { + loaded_var = tmp; + loaded_base = 1; + } else { + store_reg(s, i, tmp); + } + } else { + /* Store. */ + tmp = load_reg(s, i); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, 4); + } + if (loaded_base) { + store_reg(s, rn, loaded_var); + } + if (insn & (1 << 21)) { + /* Base register writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, -offset); + } + /* Fault if writeback register is in register list. */ + if (insn & (1 << rn)) + goto illegal_op; + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + } + } + break; + case 5: + + op = (insn >> 21) & 0xf; + if (op == 6) { + /* Halfword pack. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); + if (insn & (1 << 5)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp2, tmp2, shift); + tcg_gen_andi_i32(tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tmp2, tmp2); + } else { + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + } + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else { + /* Data processing register constant shift. */ + if (rn == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rn); + } + tmp2 = load_reg(s, rm); + + shiftop = (insn >> 4) & 3; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + conds = (insn & (1 << 20)) != 0; + logic_cc = (conds && thumb2_logic_op(op)); + gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); + if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) + goto illegal_op; + tcg_temp_free_i32(tmp2); + if (rd != 15) { + store_reg(s, rd, tmp); + } else { + tcg_temp_free_i32(tmp); + } + } + break; + case 13: /* Misc data processing. */ + op = ((insn >> 22) & 6) | ((insn >> 7) & 1); + if (op < 4 && (insn & 0xf000) != 0xf000) + goto illegal_op; + switch (op) { + case 0: /* Register controlled shift. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((insn & 0x70) != 0) + goto illegal_op; + op = (insn >> 21) & 3; + logic_cc = (insn & (1 << 20)) != 0; + gen_arm_shift_reg(tmp, op, tmp2, logic_cc); + if (logic_cc) + gen_logic_CC(tmp); + store_reg_bx(env, s, rd, tmp); + break; + case 1: /* Sign/zero extend. */ + tmp = load_reg(s, rm); + shift = (insn >> 4) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tmp, tmp, shift * 8); + op = (insn >> 20) & 7; + switch (op) { + case 0: gen_sxth(tmp); break; + case 1: gen_uxth(tmp); break; + case 2: gen_sxtb16(tmp); break; + case 3: gen_uxtb16(tmp); break; + case 4: gen_sxtb(tmp); break; + case 5: gen_uxtb(tmp); break; + default: goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op >> 1) == 1) { + gen_add16(tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + } + store_reg(s, rd, tmp); + break; + case 2: /* SIMD add/subtract. */ + op = (insn >> 20) & 7; + shift = (insn >> 4) & 7; + if ((op & 3) == 3 || (shift & 3) == 3) + goto illegal_op; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + gen_thumb2_parallel_addsub(op, shift, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* Other data processing. */ + op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); + if (op < 4) { + /* Saturating add/subtract. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if (op & 1) + gen_helper_double_saturate(tmp, tmp); + if (op & 2) + gen_helper_sub_saturate(tmp, tmp2, tmp); + else + gen_helper_add_saturate(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } else { + tmp = load_reg(s, rn); + switch (op) { + case 0x0a: /* rbit */ + gen_helper_rbit(tmp, tmp); + break; + case 0x08: /* rev */ + tcg_gen_bswap32_i32(tmp, tmp); + break; + case 0x09: /* rev16 */ + gen_rev16(tmp); + break; + case 0x0b: /* revsh */ + gen_revsh(tmp); + break; + case 0x10: /* sel */ + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + tcg_temp_free_i32(tmp2); + break; + case 0x18: /* clz */ + gen_helper_clz(tmp, tmp); + break; + default: + goto illegal_op; + } + } + store_reg(s, rd, tmp); + break; + case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ + op = (insn >> 4) & 0xf; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + switch ((insn >> 20) & 7) { + case 0: /* 32 x 32 -> 32 */ + tcg_gen_mul_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + if (op) + tcg_gen_sub_i32(tmp, tmp2, tmp); + else + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 1: /* 16 x 16 -> 32 */ + gen_mulxy(tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 2: /* Dual multiply add. */ + case 4: /* Dual multiply subtract. */ + if (op) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (insn & (1 << 22)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a signed + * operation, in which case we must set the Q flag. + */ + gen_helper_add_setq(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + if (rs != 15) + { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 3: /* 32 * 16 -> 32msb */ + if (op) + tcg_gen_sari_i32(tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + tcg_gen_shri_i64(tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + if (rs != 15) + { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ + tmp64 = gen_muls_i64_i32(tmp, tmp2); + if (rs != 15) { + tmp = load_reg(s, rs); + if (insn & (1 << 20)) { + tmp64 = gen_addq_msw(tmp64, tmp); + } else { + tmp64 = gen_subq_msw(tmp64, tmp); + } + } + if (insn & (1 << 4)) { + tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + break; + case 7: /* Unsigned sum of absolute differences. */ + gen_helper_usad8(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + } + store_reg(s, rd, tmp); + break; + case 6: case 7: /* 64-bit multiply, Divide. */ + op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((op & 0x50) == 0x10) { + /* sdiv, udiv */ + if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) { + goto illegal_op; + } + if (op & 0x20) + gen_helper_udiv(tmp, tmp, tmp2); + else + gen_helper_sdiv(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((op & 0xe) == 0xc) { + /* Dual multiply accumulate long. */ + if (op & 1) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (op & 0x10) { + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + /* BUGFIX */ + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rs, rd); + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tmp64); + } else { + if (op & 0x20) { + /* Unsigned 64-bit multiply */ + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + } else { + if (op & 8) { + /* smlalxy */ + gen_mulxy(tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tmp2); + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + } else { + /* Signed 64-bit multiply */ + tmp64 = gen_muls_i64_i32(tmp, tmp2); + } + } + if (op & 4) { + /* umaal */ + gen_addq_lo(s, tmp64, rs); + gen_addq_lo(s, tmp64, rd); + } else if (op & 0x40) { + /* 64-bit accumulate. */ + gen_addq(s, tmp64, rs, rd); + } + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tmp64); + } + break; + } + break; + case 6: case 7: case 14: case 15: + /* Coprocessor. */ + if (((insn >> 24) & 3) == 3) { + /* Translate into the equivalent ARM encoding. */ + insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); + if (disas_neon_data_insn(env, s, insn)) + goto illegal_op; + } else { + if (insn & (1 << 28)) + goto illegal_op; + if (disas_coproc_insn (env, s, insn)) + goto illegal_op; + } + break; + case 8: case 9: case 10: case 11: + if (insn & (1 << 15)) { + /* Branches, misc control. */ + if (insn & 0x5000) { + /* Unconditional branch. */ + /* signextend(hw1[10:0]) -> offset[:12]. */ + offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff; + /* hw1[10:0] -> offset[11:1]. */ + offset |= (insn & 0x7ff) << 1; + /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] + offset[24:22] already have the same value because of the + sign extension above. */ + offset ^= ((~insn) & (1 << 13)) << 10; + offset ^= ((~insn) & (1 << 11)) << 11; + + if (insn & (1 << 14)) { + /* Branch and link. */ + tcg_gen_movi_i32(cpu_R[14], s->pc | 1); + } + + offset += s->pc; + if (insn & (1 << 12)) { + /* b/bl */ + gen_jmp(s, offset); + } else { + /* blx */ + offset &= ~(uint32_t)2; + /* thumb2 bx, no need to check */ + gen_bx_im(s, offset); + } + } else if (((insn >> 23) & 7) == 7) { + /* Misc control */ + if (insn & (1 << 13)) + goto illegal_op; + + if (insn & (1 << 26)) { + /* Secure monitor call (v6Z) */ + goto illegal_op; /* not implemented. */ + } else { + op = (insn >> 20) & 7; + switch (op) { + case 0: /* msr cpsr. */ + if (IS_M(env)) { + tmp = load_reg(s, rn); + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + break; + } + /* fall through */ + case 1: /* msr spsr. */ + if (IS_M(env)) + goto illegal_op; + tmp = load_reg(s, rn); + if (gen_set_psr(s, + msr_mask(env, s, (insn >> 8) & 0xf, op == 1), + op == 1, tmp)) + goto illegal_op; + break; + case 2: /* cps, nop-hint. */ + if (((insn >> 8) & 7) == 0) { + gen_nop_hint(s, insn & 0xff); + } + /* Implemented as NOP in user mode. */ + if (IS_USER(s)) + break; + offset = 0; + imm = 0; + if (insn & (1 << 10)) { + if (insn & (1 << 7)) + offset |= CPSR_A; + if (insn & (1 << 6)) + offset |= CPSR_I; + if (insn & (1 << 5)) + offset |= CPSR_F; + if (insn & (1 << 9)) + imm = CPSR_A | CPSR_I | CPSR_F; + } + if (insn & (1 << 8)) { + offset |= 0x1f; + imm |= (insn & 0x1f); + } + if (offset) { + gen_set_psr_im(s, offset, 0, imm); + } + break; + case 3: /* Special control operations. */ + ARCH(7); + op = (insn >> 4) & 0xf; + switch (op) { + case 2: /* clrex */ + gen_clrex(s); + break; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + /* These execute as NOPs. */ + break; + default: + goto illegal_op; + } + break; + case 4: /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rn); + gen_bx(s, tmp); + break; + case 5: /* Exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + if (rn != 14 || rd != 15) { + goto illegal_op; + } + tmp = load_reg(s, rn); + tcg_gen_subi_i32(tmp, tmp, insn & 0xff); + gen_exception_return(s, tmp); + break; + case 6: /* mrs cpsr. */ + tmp = tcg_temp_new_i32(); + if (IS_M(env)) { + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_mrs(tmp, cpu_env, addr); + tcg_temp_free_i32(addr); + } else { + gen_helper_cpsr_read(tmp); + } + store_reg(s, rd, tmp); + break; + case 7: /* mrs spsr. */ + /* Not accessible in user mode. */ + if (IS_USER(s) || IS_M(env)) + goto illegal_op; + tmp = load_cpu_field(spsr); + store_reg(s, rd, tmp); + break; + } + } + } else { + /* Conditional branch. */ + op = (insn >> 22) & 0xf; + /* Generate a conditional jump to next instruction. */ + s->condlabel = gen_new_label(); + gen_test_cc(op ^ 1, s->condlabel); + s->condjmp = 1; + + /* offset[11:1] = insn[10:0] */ + offset = (insn & 0x7ff) << 1; + /* offset[17:12] = insn[21:16]. */ + offset |= (insn & 0x003f0000) >> 4; + /* offset[31:20] = insn[26]. */ + offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; + /* offset[18] = insn[13]. */ + offset |= (insn & (1 << 13)) << 5; + /* offset[19] = insn[11]. */ + offset |= (insn & (1 << 11)) << 8; + + /* jump to the offset */ + gen_jmp(s, s->pc + offset); + } + } else { + /* Data processing immediate. */ + if (insn & (1 << 25)) { + if (insn & (1 << 24)) { + if (insn & (1 << 20)) + goto illegal_op; + /* Bitfield/Saturate. */ + op = (insn >> 21) & 7; + imm = insn & 0x1f; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + if (rn == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rn); + } + switch (op) { + case 2: /* Signed bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_sbfx(tmp, shift, imm); + break; + case 6: /* Unsigned bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_ubfx(tmp, shift, (1u << imm) - 1); + break; + case 3: /* Bitfield insert/clear. */ + if (imm < shift) + goto illegal_op; + imm = imm + 1 - shift; + if (imm != 32) { + tmp2 = load_reg(s, rd); + gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); + tcg_temp_free_i32(tmp2); + } + break; + case 7: + goto illegal_op; + default: /* Saturate. */ + if (shift) { + if (op & 1) + tcg_gen_sari_i32(tmp, tmp, shift); + else + tcg_gen_shli_i32(tmp, tmp, shift); + } + tmp2 = tcg_const_i32(imm); + if (op & 4) { + /* Unsigned. */ + if ((op & 1) && shift == 0) + gen_helper_usat16(tmp, tmp, tmp2); + else + gen_helper_usat(tmp, tmp, tmp2); + } else { + /* Signed. */ + if ((op & 1) && shift == 0) + gen_helper_ssat16(tmp, tmp, tmp2); + else + gen_helper_ssat(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + break; + } + store_reg(s, rd, tmp); + } else { + imm = ((insn & 0x04000000) >> 15) + | ((insn & 0x7000) >> 4) | (insn & 0xff); + if (insn & (1 << 22)) { + /* 16-bit immediate. */ + imm |= (insn >> 4) & 0xf000; + if (insn & (1 << 23)) { + /* movt */ + tmp = load_reg(s, rd); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_ori_i32(tmp, tmp, imm << 16); + } else { + /* movw */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, imm); + } + } else { + /* Add/sub 12-bit immediate. */ + if (rn == 15) { + offset = s->pc & ~(uint32_t)3; + if (insn & (1 << 23)) + offset -= imm; + else + offset += imm; + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, offset); + } else { + tmp = load_reg(s, rn); + if (insn & (1 << 23)) + tcg_gen_subi_i32(tmp, tmp, imm); + else + tcg_gen_addi_i32(tmp, tmp, imm); + } + } + store_reg(s, rd, tmp); + } + } else { + int shifter_out = 0; + /* modified 12-bit immediate. */ + shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); + imm = (insn & 0xff); + switch (shift) { + case 0: /* XY */ + /* Nothing to do. */ + break; + case 1: /* 00XY00XY */ + imm |= imm << 16; + break; + case 2: /* XY00XY00 */ + imm |= imm << 16; + imm <<= 8; + break; + case 3: /* XYXYXYXY */ + imm |= imm << 16; + imm |= imm << 8; + break; + default: /* Rotated constant. */ + shift = (shift << 1) | (imm >> 7); + imm |= 0x80; + imm = imm << (32 - shift); + shifter_out = 1; + break; + } + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, imm); + rn = (insn >> 16) & 0xf; + if (rn == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rn); + } + op = (insn >> 21) & 0xf; + if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, + shifter_out, tmp, tmp2)) + goto illegal_op; + tcg_temp_free_i32(tmp2); + rd = (insn >> 8) & 0xf; + if (rd != 15) { + store_reg(s, rd, tmp); + } else { + tcg_temp_free_i32(tmp); + } + } + } + break; + case 12: /* Load/store single data item. */ + { + int postinc = 0; + int writeback = 0; + int user; + if ((insn & 0x01100000) == 0x01000000) { + if (disas_neon_ls_insn(env, s, insn)) + goto illegal_op; + break; + } + op = ((insn >> 21) & 3) | ((insn >> 22) & 4); + if (rs == 15) { + if (!(insn & (1 << 20))) { + goto illegal_op; + } + if (op != 2) { + /* Byte or halfword load space with dest == r15 : memory hints. + * Catch them early so we don't emit pointless addressing code. + * This space is a mix of: + * PLD/PLDW/PLI, which we implement as NOPs (note that unlike + * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP + * cores) + * unallocated hints, which must be treated as NOPs + * UNPREDICTABLE space, which we NOP or UNDEF depending on + * which is easiest for the decoding logic + * Some space which must UNDEF + */ + int op1 = (insn >> 23) & 3; + int op2 = (insn >> 6) & 0x3f; + if (op & 2) { + goto illegal_op; + } + if (rn == 15) { + /* UNPREDICTABLE, unallocated hint or + * PLD/PLDW/PLI (literal) + */ + return 0; + } + if (op1 & 1) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + /* UNDEF space, or an UNPREDICTABLE */ + return 1; + } + } + user = IS_USER(s); + if (rn == 15) { + addr = tcg_temp_new_i32(); + /* PC relative. */ + /* s->pc has already been incremented by 4. */ + imm = s->pc & 0xfffffffc; + if (insn & (1 << 23)) + imm += insn & 0xfff; + else + imm -= insn & 0xfff; + tcg_gen_movi_i32(addr, imm); + } else { + addr = load_reg(s, rn); + if (insn & (1 << 23)) { + /* Positive offset. */ + imm = insn & 0xfff; + tcg_gen_addi_i32(addr, addr, imm); + } else { + imm = insn & 0xff; + switch ((insn >> 8) & 0xf) { + case 0x0: /* Shifted Register. */ + shift = (insn >> 4) & 0xf; + if (shift > 3) { + tcg_temp_free_i32(addr); + goto illegal_op; + } + tmp = load_reg(s, rm); + if (shift) + tcg_gen_shli_i32(tmp, tmp, shift); + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + break; + case 0xc: /* Negative offset. */ + tcg_gen_addi_i32(addr, addr, -imm); + break; + case 0xe: /* User privilege. */ + tcg_gen_addi_i32(addr, addr, imm); + user = 1; + break; + case 0x9: /* Post-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xb: /* Post-increment. */ + postinc = 1; + writeback = 1; + break; + case 0xd: /* Pre-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xf: /* Pre-increment. */ + tcg_gen_addi_i32(addr, addr, imm); + writeback = 1; + break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; + } + } + } + if (insn & (1 << 20)) { + /* Load. */ + switch (op) { + case 0: tmp = gen_ld8u(addr, user); break; + case 4: tmp = gen_ld8s(addr, user); break; + case 1: tmp = gen_ld16u(addr, user); break; + case 5: tmp = gen_ld16s(addr, user); break; + case 2: tmp = gen_ld32(addr, user); break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; + } + if (rs == 15) { + gen_bx(s, tmp); + } else { + store_reg(s, rs, tmp); + } + } else { + /* Store. */ + tmp = load_reg(s, rs); + switch (op) { + case 0: gen_st8(tmp, addr, user); break; + case 1: gen_st16(tmp, addr, user); break; + case 2: gen_st32(tmp, addr, user); break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; + } + } + if (postinc) + tcg_gen_addi_i32(addr, addr, imm); + if (writeback) { + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + } + break; + default: + goto illegal_op; + } + return 0; +illegal_op: + return 1; +} + +static void disas_thumb_insn(CPUARMState *env, DisasContext *s) +{ + uint32_t val, insn, op, rm, rn, rd, shift, cond; + int32_t offset; + int i; + TCGv tmp; + TCGv tmp2; + TCGv addr; + + if (s->condexec_mask) { + cond = s->condexec_cond; + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ + s->condlabel = gen_new_label(); + gen_test_cc(cond ^ 1, s->condlabel); + s->condjmp = 1; + } + } + + insn = arm_lduw_code(s->pc, s->bswap_code); + s->pc += 2; + + switch (insn >> 12) { + case 0: case 1: + + rd = insn & 7; + op = (insn >> 11) & 3; + if (op == 3) { + /* add/subtract */ + rn = (insn >> 3) & 7; + tmp = load_reg(s, rn); + if (insn & (1 << 10)) { + /* immediate */ + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); + } else { + /* reg */ + rm = (insn >> 6) & 7; + tmp2 = load_reg(s, rm); + } + if (insn & (1 << 9)) { + if (s->condexec_mask) + tcg_gen_sub_i32(tmp, tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + } else { + if (s->condexec_mask) + tcg_gen_add_i32(tmp, tmp, tmp2); + else + gen_helper_add_cc(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else { + /* shift immediate */ + rm = (insn >> 3) & 7; + shift = (insn >> 6) & 0x1f; + tmp = load_reg(s, rm); + gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0); + if (!s->condexec_mask) + gen_logic_CC(tmp); + store_reg(s, rd, tmp); + } + break; + case 2: case 3: + /* arithmetic large immediate */ + op = (insn >> 11) & 3; + rd = (insn >> 8) & 0x7; + if (op == 0) { /* mov */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, insn & 0xff); + if (!s->condexec_mask) + gen_logic_CC(tmp); + store_reg(s, rd, tmp); + } else { + tmp = load_reg(s, rd); + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, insn & 0xff); + switch (op) { + case 1: /* cmp */ + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + break; + case 2: /* add */ + if (s->condexec_mask) + tcg_gen_add_i32(tmp, tmp, tmp2); + else + gen_helper_add_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* sub */ + if (s->condexec_mask) + tcg_gen_sub_i32(tmp, tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + } + } + break; + case 4: + if (insn & (1 << 11)) { + rd = (insn >> 8) & 7; + /* load pc-relative. Bit 1 of PC is ignored. */ + val = s->pc + 2 + ((insn & 0xff) * 4); + val &= ~(uint32_t)2; + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, val); + tmp = gen_ld32(addr, IS_USER(s)); + tcg_temp_free_i32(addr); + store_reg(s, rd, tmp); + break; + } + if (insn & (1 << 10)) { + /* data processing extended or blx */ + rd = (insn & 7) | ((insn >> 4) & 8); + rm = (insn >> 3) & 0xf; + op = (insn >> 8) & 3; + switch (op) { + case 0: /* add */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 1: /* cmp */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + case 2: /* mov/cpy */ + tmp = load_reg(s, rm); + store_reg(s, rd, tmp); + break; + case 3:/* branch [and link] exchange thumb register */ + tmp = load_reg(s, rm); + if (insn & (1 << 7)) { + ARCH(5); + val = (uint32_t)s->pc | 1; + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, val); + store_reg(s, 14, tmp2); + } + /* already thumb, no need to check */ + gen_bx(s, tmp); + break; + } + break; + } + + /* data processing register */ + rd = insn & 7; + rm = (insn >> 3) & 7; + op = (insn >> 6) & 0xf; + if (op == 2 || op == 3 || op == 4 || op == 7) { + /* the shift/rotate ops want the operands backwards */ + val = rm; + rm = rd; + rd = val; + val = 1; + } else { + val = 0; + } + + if (op == 9) { /* neg */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else if (op != 0xf) { /* mvn doesn't read its first operand */ + tmp = load_reg(s, rd); + } else { + TCGV_UNUSED(tmp); + } + + tmp2 = load_reg(s, rm); + switch (op) { + case 0x0: /* and */ + tcg_gen_and_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0x1: /* eor */ + tcg_gen_xor_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0x2: /* lsl */ + if (s->condexec_mask) { + gen_helper_shl(tmp2, tmp2, tmp); + } else { + gen_helper_shl_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x3: /* lsr */ + if (s->condexec_mask) { + gen_helper_shr(tmp2, tmp2, tmp); + } else { + gen_helper_shr_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x4: /* asr */ + if (s->condexec_mask) { + gen_helper_sar(tmp2, tmp2, tmp); + } else { + gen_helper_sar_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x5: /* adc */ + if (s->condexec_mask) + gen_adc(tmp, tmp2); + else + gen_helper_adc_cc(tmp, tmp, tmp2); + break; + case 0x6: /* sbc */ + if (s->condexec_mask) + gen_sub_carry(tmp, tmp, tmp2); + else + gen_helper_sbc_cc(tmp, tmp, tmp2); + break; + case 0x7: /* ror */ + if (s->condexec_mask) { + tcg_gen_andi_i32(tmp, tmp, 0x1f); + tcg_gen_rotr_i32(tmp2, tmp2, tmp); + } else { + gen_helper_ror_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x8: /* tst */ + tcg_gen_and_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + rd = 16; + break; + case 0x9: /* neg */ + if (s->condexec_mask) + tcg_gen_neg_i32(tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + break; + case 0xa: /* cmp */ + gen_helper_sub_cc(tmp, tmp, tmp2); + rd = 16; + break; + case 0xb: /* cmn */ + gen_helper_add_cc(tmp, tmp, tmp2); + rd = 16; + break; + case 0xc: /* orr */ + tcg_gen_or_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xd: /* mul */ + tcg_gen_mul_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xe: /* bic */ + tcg_gen_andc_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xf: /* mvn */ + tcg_gen_not_i32(tmp2, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp2); + val = 1; + rm = rd; + break; + } + if (rd != 16) { + if (val) { + store_reg(s, rm, tmp2); + if (op != 0xf) + tcg_temp_free_i32(tmp); + } else { + store_reg(s, rd, tmp); + tcg_temp_free_i32(tmp2); + } + } else { + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + } + break; + + case 5: + /* load/store register offset. */ + rd = insn & 7; + rn = (insn >> 3) & 7; + rm = (insn >> 6) & 7; + op = (insn >> 9) & 7; + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + + if (op < 3) /* store */ + tmp = load_reg(s, rd); + + switch (op) { + case 0: /* str */ + gen_st32(tmp, addr, IS_USER(s)); + break; + case 1: /* strh */ + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: /* strb */ + gen_st8(tmp, addr, IS_USER(s)); + break; + case 3: /* ldrsb */ + tmp = gen_ld8s(addr, IS_USER(s)); + break; + case 4: /* ldr */ + tmp = gen_ld32(addr, IS_USER(s)); + break; + case 5: /* ldrh */ + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 6: /* ldrb */ + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 7: /* ldrsh */ + tmp = gen_ld16s(addr, IS_USER(s)); + break; + } + if (op >= 3) /* load */ + store_reg(s, rd, tmp); + tcg_temp_free_i32(addr); + break; + + case 6: + /* load/store word immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 4) & 0x7c; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + break; + + case 7: + /* load/store byte immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 6) & 0x1f; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld8u(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st8(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + break; + + case 8: + /* load/store halfword immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 5) & 0x3e; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld16u(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st16(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + break; + + case 9: + /* load/store from stack */ + rd = (insn >> 8) & 7; + addr = load_reg(s, 13); + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + break; + + case 10: + /* add to high reg */ + rd = (insn >> 8) & 7; + if (insn & (1 << 11)) { + /* SP */ + tmp = load_reg(s, 13); + } else { + /* PC. bit 1 is ignored. */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2); + } + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(tmp, tmp, val); + store_reg(s, rd, tmp); + break; + + case 11: + /* misc */ + op = (insn >> 8) & 0xf; + switch (op) { + case 0: + /* adjust stack pointer */ + tmp = load_reg(s, 13); + val = (insn & 0x7f) * 4; + if (insn & (1 << 7)) + val = -(int32_t)val; + tcg_gen_addi_i32(tmp, tmp, val); + store_reg(s, 13, tmp); + break; + + case 2: /* sign/zero extend. */ + ARCH(6); + rd = insn & 7; + rm = (insn >> 3) & 7; + tmp = load_reg(s, rm); + switch ((insn >> 6) & 3) { + case 0: gen_sxth(tmp); break; + case 1: gen_sxtb(tmp); break; + case 2: gen_uxth(tmp); break; + case 3: gen_uxtb(tmp); break; + } + store_reg(s, rd, tmp); + break; + case 4: case 5: case 0xc: case 0xd: + /* push/pop */ + addr = load_reg(s, 13); + if (insn & (1 << 8)) + offset = 4; + else + offset = 0; + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) + offset += 4; + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(addr, addr, -offset); + } + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* pop */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, i, tmp); + } else { + /* push */ + tmp = load_reg(s, i); + gen_st32(tmp, addr, IS_USER(s)); + } + /* advance to the next address. */ + tcg_gen_addi_i32(addr, addr, 4); + } + } + TCGV_UNUSED(tmp); + if (insn & (1 << 8)) { + if (insn & (1 << 11)) { + /* pop pc */ + tmp = gen_ld32(addr, IS_USER(s)); + /* don't set the pc until the rest of the instruction + has completed */ + } else { + /* push lr */ + tmp = load_reg(s, 14); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, 4); + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(addr, addr, -offset); + } + /* write back the new stack pointer */ + store_reg(s, 13, addr); + /* set the new PC value */ + if ((insn & 0x0900) == 0x0900) { + store_reg_from_load(env, s, 15, tmp); + } + break; + + case 1: case 3: case 9: case 11: /* czb */ + rm = insn & 7; + tmp = load_reg(s, rm); + s->condlabel = gen_new_label(); + s->condjmp = 1; + if (insn & (1 << 11)) + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); + else + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel); + tcg_temp_free_i32(tmp); + offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; + val = (uint32_t)s->pc + 2; + val += offset; + gen_jmp(s, val); + break; + + case 15: /* IT, nop-hint. */ + if ((insn & 0xf) == 0) { + gen_nop_hint(s, (insn >> 4) & 0xf); + break; + } + /* If Then. */ + s->condexec_cond = (insn >> 4) & 0xe; + s->condexec_mask = insn & 0x1f; + /* No actual code generated for this insn, just setup state. */ + break; + + case 0xe: /* bkpt */ + ARCH(5); + gen_exception_insn(s, 2, EXCP_BKPT); + break; + + case 0xa: /* rev */ + ARCH(6); + rn = (insn >> 3) & 0x7; + rd = insn & 0x7; + tmp = load_reg(s, rn); + switch ((insn >> 6) & 3) { + case 0: tcg_gen_bswap32_i32(tmp, tmp); break; + case 1: gen_rev16(tmp); break; + case 3: gen_revsh(tmp); break; + default: goto illegal_op; + } + store_reg(s, rd, tmp); + break; + + case 6: + switch ((insn >> 5) & 7) { + case 2: + /* setend */ + ARCH(6); + if (((insn >> 3) & 1) != s->bswap_code) { + /* Dynamic endianness switching not implemented. */ + goto illegal_op; + } + break; + case 3: + /* cps */ + ARCH(6); + if (IS_USER(s)) { + break; + } + if (IS_M(env)) { + tmp = tcg_const_i32((insn & (1 << 4)) != 0); + /* FAULTMASK */ + if (insn & 1) { + addr = tcg_const_i32(19); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + } + /* PRIMASK */ + if (insn & 2) { + addr = tcg_const_i32(16); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + } + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + } else { + if (insn & (1 << 4)) { + shift = CPSR_A | CPSR_I | CPSR_F; + } else { + shift = 0; + } + gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); + } + break; + default: + goto undef; + } + break; + + default: + goto undef; + } + break; + + case 12: + { + /* load/store multiple */ + TCGv loaded_var; + TCGV_UNUSED(loaded_var); + rn = (insn >> 8) & 0x7; + addr = load_reg(s, rn); + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + if (i == rn) { + loaded_var = tmp; + } else { + store_reg(s, i, tmp); + } + } else { + /* store */ + tmp = load_reg(s, i); + gen_st32(tmp, addr, IS_USER(s)); + } + /* advance to the next address */ + tcg_gen_addi_i32(addr, addr, 4); + } + } + if ((insn & (1 << rn)) == 0) { + /* base reg not in list: base register writeback */ + store_reg(s, rn, addr); + } else { + /* base reg in list: if load, complete it now */ + if (insn & (1 << 11)) { + store_reg(s, rn, loaded_var); + } + tcg_temp_free_i32(addr); + } + break; + } + case 13: + /* conditional branch or swi */ + cond = (insn >> 8) & 0xf; + if (cond == 0xe) + goto undef; + + if (cond == 0xf) { + /* swi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_SWI; + break; + } + /* generate a conditional jump to next instruction */ + s->condlabel = gen_new_label(); + gen_test_cc(cond ^ 1, s->condlabel); + s->condjmp = 1; + + /* jump to the offset */ + val = (uint32_t)s->pc + 2; + offset = ((int32_t)insn << 24) >> 24; + val += offset << 1; + gen_jmp(s, val); + break; + + case 14: + if (insn & (1 << 11)) { + if (disas_thumb2_insn(env, s, insn)) + goto undef32; + break; + } + /* unconditional branch */ + val = (uint32_t)s->pc; + offset = ((int32_t)insn << 21) >> 21; + val += (offset << 1) + 2; + gen_jmp(s, val); + break; + + case 15: + if (disas_thumb2_insn(env, s, insn)) + goto undef32; + break; + } + return; +undef32: + gen_exception_insn(s, 4, EXCP_UDEF); + return; +illegal_op: +undef: + gen_exception_insn(s, 2, EXCP_UDEF); +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +static inline void gen_intermediate_code_internal(CPUARMState *env, + TranslationBlock *tb, + int search_pc) +{ + DisasContext dc1, *dc = &dc1; + CPUBreakpoint *bp; + uint16_t *gen_opc_end; + int j, lj; + target_ulong pc_start; + uint32_t next_page_start; + int num_insns; + int max_insns; + + /* generate intermediate code */ + pc_start = tb->pc; + + dc->tb = tb; + + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + + dc->is_jmp = DISAS_NEXT; + dc->pc = pc_start; + dc->singlestep_enabled = env->singlestep_enabled; + dc->condjmp = 0; + dc->thumb = ARM_TBFLAG_THUMB(tb->flags); + dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags); + dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; + dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; +#if !defined(CONFIG_USER_ONLY) + dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); +#endif + dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); + dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); + dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); + cpu_F0s = tcg_temp_new_i32(); + cpu_F1s = tcg_temp_new_i32(); + cpu_F0d = tcg_temp_new_i64(); + cpu_F1d = tcg_temp_new_i64(); + cpu_V0 = cpu_F0d; + cpu_V1 = cpu_F1d; + /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ + cpu_M0 = tcg_temp_new_i64(); + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + lj = -1; + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + //gen_icount_start(); + + tcg_clear_temp_count(); + + /* A note on handling of the condexec (IT) bits: + * + * We want to avoid the overhead of having to write the updated condexec + * bits back to the CPUARMState for every instruction in an IT block. So: + * (1) if the condexec bits are not already zero then we write + * zero back into the CPUARMState now. This avoids complications trying + * to do it at the end of the block. (For example if we don't do this + * it's hard to identify whether we can safely skip writing condexec + * at the end of the TB, which we definitely want to do for the case + * where a TB doesn't do anything with the IT state at all.) + * (2) if we are going to leave the TB then we call gen_set_condexec() + * which will write the correct value into CPUARMState if zero is wrong. + * This is done both for leaving the TB at the end, and for leaving + * it because of an exception we know will happen, which is done in + * gen_exception_insn(). The latter is necessary because we need to + * leave the TB with the PC/IT state just prior to execution of the + * instruction which caused the exception. + * (3) if we leave the TB unexpectedly (eg a data abort on a load) + * then the CPUARMState will be wrong and we need to reset it. + * This is handled in the same way as restoration of the + * PC in these situations: we will be called again with search_pc=1 + * and generate a mapping of the condexec bits for each PC in + * gen_opc_condexec_bits[]. restore_state_to_opc() then uses + * this to restore the condexec bits. + * + * Note that there are no instructions which can read the condexec + * bits, and none which can write non-static values to them, so + * we don't need to care about whether CPUARMState is correct in the + * middle of a TB. + */ + + /* Reset the conditional execution bits immediately. This avoids + complications trying to do it at the end of the block. */ + if (dc->condexec_mask || dc->condexec_cond) + { + TCGv tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + store_cpu_field(tmp, condexec_bits); + } + do { +#ifdef CONFIG_USER_ONLY + /* Intercept jump to the magic kernel page. */ + if (dc->pc >= 0xffff0000) { + /* We always get here via a jump, so know we are not in a + conditional execution block. */ + gen_exception(EXCP_KERNEL_TRAP); + dc->is_jmp = DISAS_UPDATE; + break; + } +#else + if (dc->pc >= 0xfffffff0 && IS_M(env)) { + /* We always get here via a jump, so know we are not in a + conditional execution block. */ + gen_exception(EXCP_EXCEPTION_EXIT); + dc->is_jmp = DISAS_UPDATE; + break; + } +#endif + + if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { + QTAILQ_FOREACH(bp, &env->breakpoints, entry) { + if (bp->pc == dc->pc) { + gen_exception_insn(dc, 0, EXCP_DEBUG); + /* Advance PC so that clearing the breakpoint will + invalidate this TB. */ + dc->pc += 2; + goto done_generating; + break; + } + } + } + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + gen_opc_instr_start[lj++] = 0; + } + gen_opc_pc[lj] = dc->pc; + gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); + gen_opc_instr_start[lj] = 1; + gen_opc_icount[lj] = num_insns; + } + + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + //gen_io_start(); + +// if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { +// tcg_gen_debug_insn_start(dc->pc); +// } + + if (dc->thumb) { + disas_thumb_insn(env, dc); + if (dc->condexec_mask) { + dc->condexec_cond = (dc->condexec_cond & 0xe) + | ((dc->condexec_mask >> 4) & 1); + dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; + if (dc->condexec_mask == 0) { + dc->condexec_cond = 0; + } + } + } else { + disas_arm_insn(env, dc); + } + + if (dc->condjmp && !dc->is_jmp) { + gen_set_label(dc->condlabel); + dc->condjmp = 0; + } + + if (tcg_check_temp_count()) { + fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc); + } + + /* Translation stops when a conditional branch is encountered. + * Otherwise the subsequent code could get translated several times. + * Also stop translation when a page boundary is reached. This + * ensures prefetch aborts occur at the right place. */ + num_insns ++; + } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && + !env->singlestep_enabled && + !singlestep && + dc->pc < next_page_start && + num_insns < max_insns); + + if (tb->cflags & CF_LAST_IO) { + if (dc->condjmp) { + /* FIXME: This can theoretically happen with self-modifying + code. */ + cpu_abort(env, "IO on conditional branch instruction"); + } + //gen_io_end(); + } + + /* At this stage dc->condjmp will only be set when the skipped + instruction was a conditional branch or trap, and the PC has + already been written. */ + if (unlikely(env->singlestep_enabled)) { + /* Make sure the pc is updated, and raise a debug exception. */ + if (dc->condjmp) { + gen_set_condexec(dc); + if (dc->is_jmp == DISAS_SWI) { + gen_exception(EXCP_SWI); + } else { + gen_exception(EXCP_DEBUG); + } + gen_set_label(dc->condlabel); + } + if (dc->condjmp || !dc->is_jmp) { + gen_set_pc_im(dc->pc); + dc->condjmp = 0; + } + gen_set_condexec(dc); + if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { + gen_exception(EXCP_SWI); + } else { + /* FIXME: Single stepping a WFI insn will not halt + the CPU. */ + gen_exception(EXCP_DEBUG); + } + } else { + /* While branches must always occur at the end of an IT block, + there are a few other things that can cause us to terminate + the TB in the middel of an IT block: + - Exception generating instructions (bkpt, swi, undefined). + - Page boundaries. + - Hardware watchpoints. + Hardware breakpoints have already been handled and skip this code. + */ + gen_set_condexec(dc); + switch(dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + case DISAS_WFI: + gen_helper_wfi(); + break; + case DISAS_SWI: + gen_exception(EXCP_SWI); + break; + } + if (dc->condjmp) { + gen_set_label(dc->condlabel); + gen_set_condexec(dc); + gen_goto_tb(dc, 1, dc->pc); + dc->condjmp = 0; + } + } + +done_generating: + //gen_icount_end(tb, num_insns); + *gen_opc_ptr = INDEX_op_end; + +//#ifdef DEBUG_DISAS +// if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { +// qemu_log("----------------\n"); +// qemu_log("IN: %s\n", lookup_symbol(pc_start)); +// log_target_disas(pc_start, dc->pc - pc_start, +// dc->thumb | (dc->bswap_code << 1)); +// qemu_log("\n"); +// } +//#endif + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) + gen_opc_instr_start[lj++] = 0; + } else { + tb->size = dc->pc - pc_start; + tb->icount = num_insns; + } +} + +void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(env, tb, 0); +} + +void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(env, tb, 1); +} + +static const char *cpu_mode_names[16] = { + "usr", "fiq", "irq", "svc", "???", "???", "???", "abt", + "???", "???", "???", "und", "???", "???", "???", "sys" +}; + +void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + int i; +#if 0 + union { + uint32_t i; + float s; + } s0, s1; + CPU_DoubleU d; + /* ??? This assumes float64 and double have the same layout. + Oh well, it's only debug dumps. */ + union { + float64 f64; + double d; + } d0; +#endif + uint32_t psr; + + for(i=0;i<16;i++) { + cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); + if ((i % 4) == 3) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + psr = cpsr_read(env); + cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n", + psr, + psr & (1 << 31) ? 'N' : '-', + psr & (1 << 30) ? 'Z' : '-', + psr & (1 << 29) ? 'C' : '-', + psr & (1 << 28) ? 'V' : '-', + psr & CPSR_T ? 'T' : 'A', + cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); + +#if 0 + for (i = 0; i < 16; i++) { + d.d = env->vfp.regs[i]; + s0.i = d.l.lower; + s1.i = d.l.upper; + d0.f64 = d.d; + cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n", + i * 2, (int)s0.i, s0.s, + i * 2 + 1, (int)s1.i, s1.s, + i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower, + d0.d); + } + cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]); +#endif +} + +void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) +{ + env->regs[15] = gen_opc_pc[pc_pos]; + env->condexec_bits = gen_opc_condexec_bits[pc_pos]; +} diff --git a/src/translate-all.c b/src/translate-all.c index 0dffebd..ec86479 100644 --- a/src/translate-all.c +++ b/src/translate-all.c @@ -146,7 +146,7 @@ static void *qemu_st_helpers[4] = { static void cpu_gen_init(TCGContext *ctx, tcg_settings_t *settings) { settings->tlb_flags_mask = TLB_FLAGS_MASK; - settings->tlb_mask_offset = offsetof(CPUX86State, tlb_mask); + settings->tlb_mask_offset = offsetof(CPUArchState, tlb_mask); settings->tlb_entry_addend_offset = offsetof(CPUTLBEntry, addend); settings->tlb_entry_addr_read_offset = offsetof(CPUTLBEntry, addr_read); settings->tlb_entry_addr_write_offset = offsetof(CPUTLBEntry, addr_write); @@ -171,11 +171,18 @@ static void cpu_gen_init(TCGContext *ctx, tcg_settings_t *settings) { extern CPUArchState *env; ctx->tcg_struct_size = sizeof(*tcg_ctx); ctx->env_ptr = (uintptr_t) &env; +#if defined(TARGET_I386) || defined(TARGET_X86_64) ctx->env_offset_eip = offsetof(CPUArchState, eip); ctx->env_sizeof_eip = sizeof(env->eip); ctx->env_offset_ccop = offsetof(CPUArchState, cc_op); ctx->env_sizeof_ccop = sizeof(env->cc_op); ctx->env_offset_df = offsetof(CPUArchState, df); +#elif defined(TARGET_ARM) + ctx->env_offset_eip = offsetof(CPUArchState, regs[15]); + ctx->env_sizeof_eip = sizeof(env->regs[15]); +#else +#error Unsupported target architecture +#endif ctx->env_offset_tlb[0] = offsetof(CPUArchState, tlb_table[0]); ctx->env_offset_tlb[1] = offsetof(CPUArchState, tlb_table[1]); @@ -245,9 +252,15 @@ int cpu_gen_code(CPUArchState *env, TranslationBlock *tb) { if (libcpu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { libcpu_log("----------------\n"); +#if defined(TARGET_I386) || defined(TARGET_X86_64) libcpu_log("OUT %#" PRIx64 " - cs:eip=%#" PRIx64 ":%#" PRIx64 "\n", (uint64_t) tb->pc, (uint64_t) tb->cs_base, (uint64_t) env->eip); - +#elif defined(TARGET_ARM) + libcpu_log("OUT %#" PRIx64 " - sp=%#" PRIx32 " pc=%#" PRIx32 "\n", (uint64_t) tb->pc, (uint32_t) env->regs[13], + (uint32_t) env->regs[15]); +#else +#error Unsupported target architecture +#endif log_host_disas(tb->tc.ptr, gen_code_size); libcpu_log("\n"); } From 9991870bb80b199650b4be4e871369efa09cd037 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 17 Oct 2019 15:26:05 -0400 Subject: [PATCH 02/59] feat: add cortex-m r/w ioctl and regs and sregs structs Signed-off-by: chaojixx --- include/cpu/arm/kvm_arm.h | 17 +++++++++++++++++ include/cpu/kvm.h | 11 +++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/cpu/arm/kvm_arm.h b/include/cpu/arm/kvm_arm.h index f950029..e05b413 100644 --- a/include/cpu/arm/kvm_arm.h +++ b/include/cpu/arm/kvm_arm.h @@ -67,6 +67,23 @@ struct kvm_regs { unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ }; +struct kvm_m_regs { + __u32 regs[16]; +}; + +struct kvm_m_sregs { + __u32 other_sp; + __u32 vecbase; + __u32 basepri; + __u32 control; + int current_sp; + int exception; + int pending_exception; + __u32 thumb; + void *nvic; +}; + + /* Supported Processor Types */ #define KVM_ARM_TARGET_CORTEX_A15 0 #define KVM_ARM_TARGET_CORTEX_A7 1 diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index 88d66ac..d257d89 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -1103,10 +1103,21 @@ struct kvm_s390_ucas_mapping { * ioctls for vcpu fds */ #define KVM_RUN _IO(KVMIO, 0x80) + + #define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) #define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) #define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) + +#define KVM_GET_M_REGS _IOR(KVMIO, 0xc0, struct kvm_m_regs) +#define KVM_SET_M_REGS _IOW(KVMIO, 0xc1, struct kvm_m_regs) +#define KVM_GET_M_SREGS _IOR(KVMIO, 0xc2, struct kvm_m_sregs) +#define KVM_SET_M_SREGS _IOW(KVMIO, 0xc3, struct kvm_m_sregs) + + + + #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ From eececf997f0292e8eb522638cbb87b8fd41dbd91 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 7 Nov 2019 22:18:02 -0500 Subject: [PATCH 03/59] cpu:fix GVA->HVA replace ldxx_raw with ldxx_code Signed-off-by: chaojixx --- src/bswap.h | 20 ++++++++++++++++---- src/cpu-all.h | 6 ++++-- src/exec-all.h | 22 ++++++++++++++++++++++ src/target-arm/cpu.h | 17 ++++++++++------- 4 files changed, 52 insertions(+), 13 deletions(-) diff --git a/src/bswap.h b/src/bswap.h index 830c03f..227d7b0 100644 --- a/src/bswap.h +++ b/src/bswap.h @@ -267,7 +267,7 @@ typedef union { } CPU_QuadU; static inline int ldub_p(const void *ptr) { - return *(uint8_t *) ptr; + return *(uint8_t *) ptr; } static inline int ldsb_p(const void *ptr) { @@ -277,11 +277,23 @@ static inline int ldsb_p(const void *ptr) { static inline void stb_p(void *ptr, int v) { *(uint8_t *) ptr = v; } - +#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) +static inline int lduw_le_p(const void *ptr) +{ +#ifdef _ARCH_PPC + int val; + __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); + return val; +#else + const uint8_t *p = ptr; + return p[0] | (p[1] << 8); +#endif +} +#else static inline int lduw_le_p(const void *ptr) { - return *(uint16_t *) ptr; + return *(uint16_t *) ptr; } - +#endif static inline int ldsw_le_p(const void *ptr) { return *(int16_t *) ptr; } diff --git a/src/cpu-all.h b/src/cpu-all.h index 3176255..cd0e229 100644 --- a/src/cpu-all.h +++ b/src/cpu-all.h @@ -19,6 +19,8 @@ #ifndef CPU_ALL_H #define CPU_ALL_H +#include "bswap.h" +#include "qemu-common.h" #include #include @@ -30,8 +32,7 @@ #error unsupported target CPU #endif -#include "bswap.h" -#include "qemu-common.h" + #ifdef CONFIG_SYMBEX #include @@ -151,6 +152,7 @@ #define stfl_be_raw(p, v) stfl_be_p(saddr((p)), v) #define stfq_be_raw(p, v) stfq_be_p(saddr((p)), v) + #else /* CONFIG_SYMBEX */ static inline int _se_check_concrete(void *objectState, target_ulong offset, int size) { diff --git a/src/exec-all.h b/src/exec-all.h index f8dc357..c5191b8 100644 --- a/src/exec-all.h +++ b/src/exec-all.h @@ -147,9 +147,31 @@ extern int tb_invalidated_flag; #define DATA_SIZE 8 #include "softmmu_header.h" + #undef ACCESS_TYPE #undef MEMSUFFIX #undef env +#if defined(TARGET_ARM) +static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) +{ + uint32_t insn = ldl_code(addr); + if (do_swap) { + return bswap32(insn); + } + return insn; +} + +/* Ditto, for a halfword (Thumb) instruction */ +static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) +{ + uint16_t insn = lduw_code(addr); + if (do_swap) { + return bswap16(insn); + } + return insn; +} +#endif + tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); diff --git a/src/target-arm/cpu.h b/src/target-arm/cpu.h index a0854e1..8aba7ba 100644 --- a/src/target-arm/cpu.h +++ b/src/target-arm/cpu.h @@ -464,23 +464,26 @@ static inline bool cpu_has_work(CPUARMState *env) { static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) { env->regs[15] = tb->pc; } - /* Load an instruction and return it in the standard little-endian order */ -static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) { - uint32_t insn = ldl_raw(addr); +/* +static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) +{ + uint32_t insn = ldl_code(addr); if (do_swap) { return bswap32(insn); } return insn; } - +*/ /* Ditto, for a halfword (Thumb) instruction */ -static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) { - uint16_t insn = lduw_raw(addr); +/* +static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) +{ + uint16_t insn = lduw_code(addr); if (do_swap) { return bswap16(insn); } return insn; } - +*/ #endif From fadd2c034aaf1783bb77887f506ab4f7c75377c4 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 17 Oct 2019 18:29:07 -0400 Subject: [PATCH 04/59] ARM interrupt: fix armv7m interrupt 1.move interrupt exit one DBT round in advance 2.when nvic update insert interrupt into cpu via arm_cpu_set_irq 3.only invoke interrupt when the prioity of current interrupt is lower than ready one Signed-off-by: chaojixx --- include/cpu/arm/cpu.h | 2 +- src/CMakeLists.txt | 3 +- src/cpu-exec.c | 8 ++- src/target-arm/helper.c | 116 ++++++++++++++++++++++++------------- src/target-arm/translate.c | 42 +++++++++++--- 5 files changed, 118 insertions(+), 53 deletions(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index b70d327..c645bc2 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -211,6 +211,6 @@ typedef struct CPUARMState { CPUARMState *cpu_arm_init(const char *cpu_model); int cpu_arm_exec(CPUARMState *s); - +void arm_cpu_set_irq(CPUARMState *env, int level); #endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c751577..e715569 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -27,7 +27,8 @@ add_library(cpu cpu-exec.c target_include_directories(cpu PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR} - ${CMAKE_BINARY_DIR}/include) + ${CMAKE_BINARY_DIR}/include + $ENV{S2EDIR}/qemu/include/nvic) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__STDC_FORMAT_MACROS -D_GNU_SOURCE -DNEED_CPU_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DTARGET_PHYS_ADDR_BITS=64") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -Werror -fno-omit-frame-pointer") diff --git a/src/cpu-exec.c b/src/cpu-exec.c index c277f52..82da0bb 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -160,6 +160,8 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env) { tb_flush(env); } #endif + + DPRINTF("Current pc=0x%x: \n",env->regs[15]); /* we record a subset of the CPU state. It will always be the same before a given translated block @@ -445,8 +447,12 @@ static bool process_interrupt_request(CPUArchState *env) { the stack if an interrupt occurred at the wrong time. We avoid this by disabling interrupts when pc contains a magic address. */ + + // in case lower prioriy interrupt so add armv7m_nvic_can_take_pending_exception + // in case basepri has not been synced so add exit code condition if (interrupt_request & CPU_INTERRUPT_HARD && - ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { + ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I)) && + (armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 13763a9..c1e72b5 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -424,6 +424,16 @@ CPUARMState *cpu_arm_init(const char *cpu_model) { return env; } +void arm_cpu_set_irq(CPUARMState *env, int level) { + + if (level) { + cpu_interrupt(env,CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); + } +} + + struct arm_cpu_t { uint32_t id; const char *name; @@ -670,7 +680,7 @@ static void do_v7m_exception_exit(CPUARMState *env) { type = env->regs[15]; if (env->v7m.exception != 0) - armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); + armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, false); /* Switch to the target stack. */ switch_v7m_sp(env, (type & 4) != 0); @@ -694,56 +704,68 @@ static void do_v7m_exception_exit(CPUARMState *env) { pointer. */ } -static void do_interrupt_v7m(CPUARMState *env) { - uint32_t xpsr = xpsr_read(env); + +void do_interrupt_v7m(CPUARMState *env) +{ + uint32_t addr; + uint32_t xpsr = xpsr_read(env); uint32_t lr; - uint32_t addr; + + int exc; + bool targets_secure; lr = 0xfffffff1; if (env->v7m.current_sp) - lr |= 4; - if (env->v7m.exception == 0) - lr |= 8; + lr |= 4; + if (env->v7m.exception == 0) + lr |= 8; /* For exceptions we just mark as pending on the NVIC, and let that handle it. */ - /* TODO: Need to escalate if the current priority is higher than the - one we're raising. */ switch (env->exception_index) { - case EXCP_UDEF: - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); - return; - case EXCP_SWI: - env->regs[15] += 2; - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC); - return; - case EXCP_PREFETCH_ABORT: - case EXCP_DATA_ABORT: - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM); - return; - case EXCP_BKPT: - if (semihosting_enabled) { - int nr; - nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; - if (nr == 0xab) { - env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); - return; - } + case EXCP_UDEF: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); + return; + case EXCP_SWI: + /* The PC already points to the next instruction. */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, false); + return; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false); + break; + case EXCP_BKPT: + if (semihosting_enabled) { + int nr; + nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + if (nr == 0xab) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + return; } - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG); - return; - case EXCP_IRQ: - env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic); - break; - case EXCP_EXCEPTION_EXIT: - do_v7m_exception_exit(env); - return; - default: - cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); - return; /* Never happens. Keep compiler happy. */ + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); + return; + case EXCP_IRQ: + armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); + armv7m_nvic_acknowledge_irq(env->nvic); + env->v7m.exception=exc; + break; + case EXCP_EXCEPTION_EXIT: + do_v7m_exception_exit(env); + return; + default: + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); + return; /* Never happens. Keep compiler happy. */ } + //lr = R_V7M_EXCRET_RES1_MASK | R_V7M_EXCRET_S_MASK | + //R_V7M_EXCRET_DCRS_MASK | R_V7M_EXCRET_FTYPE_MASK | R_V7M_EXCRET_ES_MASK; + + + //if (!arm_v7m_is_handler_mode(env)) { + // lr |= R_V7M_EXCRET_MODE_MASK; + //} /* Align stack pointer. */ /* ??? Should only do this if Configuration Control Register STACKALIGN bit is set. */ @@ -751,6 +773,7 @@ static void do_interrupt_v7m(CPUARMState *env) { env->regs[13] -= 4; xpsr |= 0x200; } + //ignore_stackfaults = v7m_push_stack(cpu); /* Switch to the handler mode. */ v7m_push(env, xpsr); v7m_push(env, env->regs[15]); @@ -760,13 +783,24 @@ static void do_interrupt_v7m(CPUARMState *env) { v7m_push(env, env->regs[2]); v7m_push(env, env->regs[1]); v7m_push(env, env->regs[0]); - switch_v7m_sp(env, 0); + + + //v7m_exception_taken(cpu, lr, false, ignore_stackfaults); + + /* Now we've done everything that might cause a derived exception + * we can go ahead and activate whichever exception we're going to + * take (which might now be the derived exception). + */ + + /* armv7m_nvic_acknowledge_irq(env->nvic); */ + switch_v7m_sp(env, 0); /* Clear IT bits */ env->condexec_bits = 0; env->regs[14] = lr; addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); env->regs[15] = addr & 0xfffffffe; env->thumb = addr & 1; + } /* Handle a CPU exception. */ diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 3bce756..3cd56cd 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -773,6 +773,7 @@ static inline void store_reg_from_load(CPUARMState *env, DisasContext *s, { if (reg == 15 && ENABLE_ARCH_5) { gen_bx(s, var); + } else { store_reg(s, reg, var); } @@ -9160,6 +9161,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; + uint32_t k, count; //only used for counting the number of reglist when poping with pc int i; TCGv tmp; TCGv tmp2; @@ -9303,9 +9305,16 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); + gen_bx(s, tmp); + }else{ + gen_bx(s,tmp); + if(env->v7m.exception != 0&&IS_M(env)&&env->regs[14]>0xf0000000){ + // printf("interrupt pc=0x%x\n", env->regs[15]); + // printf("interrupt lr=0x%x\n", env->regs[14]); + gen_exception(EXCP_EXCEPTION_EXIT); + s->is_jmp=DISAS_UPDATE; + } } - /* already thumb, no need to check */ - gen_bx(s, tmp); break; } break; @@ -9666,6 +9675,20 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) /* set the new PC value */ if ((insn & 0x0900) == 0x0900) { store_reg_from_load(env, s, 15, tmp); + // To find how many other regs pop with pc + if(env->v7m.exception != 0&&IS_M(env)){ + count=0; + for(k=0;k<8;k++){ + if((insn & (1 << k)) != 0) + count++; + } + val = ldl_phys(env->regs[13]+count*4); + // if pop pc is EXC_RETURN invode interrupt exit. + if(val>0xffff0000){ + gen_exception(EXCP_EXCEPTION_EXIT); + s->is_jmp=DISAS_UPDATE; + } + } } break; @@ -9961,13 +9984,14 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, break; } #else - if (dc->pc >= 0xfffffff0 && IS_M(env)) { - /* We always get here via a jump, so know we are not in a - conditional execution block. */ - gen_exception(EXCP_EXCEPTION_EXIT); - dc->is_jmp = DISAS_UPDATE; - break; - } + /* We move this judgement to the previous round by using + * function gen_exception(EXCP_EXCEPTION_EXIT) as same as Qemu 3.0 */ + /* if (dc->pc >= 0xfffffff0 && IS_M(env)) { + gen_exception(EXCP_EXCEPTION_EXIT); + dc->is_jmp = DISAS_UPDATE; + break; + } + */ #endif if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { From 749030c677f79f09f593489d667b30d6c60e9eac Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 17 Oct 2019 16:49:52 -0400 Subject: [PATCH 05/59] feat:sync sregs with kvm cpu Since the env pointer of kvm cpu (same as env->nvic->cpu) is different with tcg env, we let cpu_exit(env) when the sregs has been changed. Note:now only exit when basepri reg has been changed. Signed-off-by: chaojixx --- include/cpu/arm/cpu.h | 2 ++ include/cpu/kvm.h | 3 +++ src/target-arm/helper.c | 22 ++++++++-------------- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index c645bc2..e069ed6 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -206,6 +206,8 @@ typedef struct CPUARMState { /* For KVM */ int kvm_request_interrupt_window; int kvm_irq; + int kvm_exit_code; //now only used for msr + uint8_t timer_interrupt_disabled; } CPUARMState; CPUARMState *cpu_arm_init(const char *cpu_model); diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index d257d89..5f9dd94 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -185,6 +185,9 @@ struct kvm_pit_config { #define KVM_EXIT_EPR 23 #define KVM_EXIT_SYSTEM_EVENT 24 +/* ARM Cortex-m exit codes */ +#define KVM_EXIT_SYNC_ARM_V7M_SREGS 40 + /* Symbolic execution exit codes */ #define KVM_EXIT_FLUSH_DISK 100 #define KVM_EXIT_SAVE_DEV_STATE 101 diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index c1e72b5..f520d70 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -719,7 +719,7 @@ void do_interrupt_v7m(CPUARMState *env) lr |= 4; if (env->v7m.exception == 0) lr |= 8; - + //printf("interreput = 0x%x\n",env->exception_index); /* For exceptions we just mark as pending on the NVIC, and let that handle it. */ switch (env->exception_index) { @@ -759,13 +759,6 @@ void do_interrupt_v7m(CPUARMState *env) return; /* Never happens. Keep compiler happy. */ } - //lr = R_V7M_EXCRET_RES1_MASK | R_V7M_EXCRET_S_MASK | - //R_V7M_EXCRET_DCRS_MASK | R_V7M_EXCRET_FTYPE_MASK | R_V7M_EXCRET_ES_MASK; - - - //if (!arm_v7m_is_handler_mode(env)) { - // lr |= R_V7M_EXCRET_MODE_MASK; - //} /* Align stack pointer. */ /* ??? Should only do this if Configuration Control Register STACKALIGN bit is set. */ @@ -773,8 +766,6 @@ void do_interrupt_v7m(CPUARMState *env) env->regs[13] -= 4; xpsr |= 0x200; } - //ignore_stackfaults = v7m_push_stack(cpu); - /* Switch to the handler mode. */ v7m_push(env, xpsr); v7m_push(env, env->regs[15]); v7m_push(env, env->regs[14]); @@ -784,15 +775,13 @@ void do_interrupt_v7m(CPUARMState *env) v7m_push(env, env->regs[1]); v7m_push(env, env->regs[0]); - - //v7m_exception_taken(cpu, lr, false, ignore_stackfaults); /* Now we've done everything that might cause a derived exception * we can go ahead and activate whichever exception we're going to * take (which might now be the derived exception). */ - /* armv7m_nvic_acknowledge_irq(env->nvic); */ + /* Switch to the msp stack. */ switch_v7m_sp(env, 0); /* Clear IT bits */ env->condexec_bits = 0; @@ -2242,11 +2231,16 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { break; case 17: /* BASEPRI */ env->v7m.basepri = val & 0xff; + env->kvm_exit_code = 1; + cpu_exit(env); break; case 18: /* BASEPRI_MAX */ val &= 0xff; - if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) + if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)){ env->v7m.basepri = val; + env->kvm_exit_code = 1; + cpu_exit(env); + } break; case 19: /* FAULTMASK */ if (val & 1) From 0b6de3b915147bd70ad592c9efdf4c5d403c1c5c Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 17 Oct 2019 18:16:19 -0400 Subject: [PATCH 06/59] Enable symbex mode of s2e compatible with arm cpu. Signed-off-by: chaojixx --- include/cpu/arm/cpu.h | 40 +++++++++++++++++++ include/cpu/se_libcpu.h | 37 ++++++++++++----- include/cpu/se_libcpu_config.h | 16 ++++---- src/target-arm/cpu.h | 72 +++++++++++++++++----------------- src/target-arm/helper.c | 21 +++++++++- src/target-arm/op_helper.c | 34 ++++++---------- src/target-arm/translate.c | 16 ++++++++ 7 files changed, 160 insertions(+), 76 deletions(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index e069ed6..d843016 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -211,8 +211,48 @@ typedef struct CPUARMState { } CPUARMState; CPUARMState *cpu_arm_init(const char *cpu_model); +void do_cpu_arm_init(CPUARMState *env); int cpu_arm_exec(CPUARMState *s); void arm_cpu_set_irq(CPUARMState *env, int level); +int cpu_arm_handle_mmu_fault(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx); + +enum arm_cpu_mode { + ARM_CPU_MODE_USR = 0x10, + ARM_CPU_MODE_FIQ = 0x11, + ARM_CPU_MODE_IRQ = 0x12, + ARM_CPU_MODE_SVC = 0x13, + ARM_CPU_MODE_ABT = 0x17, + ARM_CPU_MODE_UND = 0x1b, + ARM_CPU_MODE_SYS = 0x1f +}; +#define CPSR_M (0x1f) +#define CPSR_T (1 << 5) +#define CPSR_F (1 << 6) +#define CPSR_I (1 << 7) +#define CPSR_A (1 << 8) +#define CPSR_E (1 << 9) +#define CPSR_IT_2_7 (0xfc00) +#define CPSR_GE (0xf << 16) +#define CPSR_RESERVED (0xf << 20) +#define CPSR_J (1 << 24) +#define CPSR_IT_0_1 (3 << 25) +#define CPSR_Q (1 << 27) +#define CPSR_V (1 << 28) +#define CPSR_C (1 << 29) +#define CPSR_Z (1 << 30) +#define CPSR_N (1 << 31) +#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) + +#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) +#define CACHED_CPSR_BITS (CPSR_T | CPSR_GE | CPSR_IT | CPSR_Q | CPSR_NZCV) +/* Bits writable in user mode. */ +#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) +/* Execution state bits. MRS read as zero, MSR writes ignored. */ +#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J) + +static inline int cpu_mmu_index(CPUARMState *env) { + return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; +} #endif diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index 48ca101..4e37cb5 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -30,13 +30,23 @@ extern "C" { #endif struct TranslationBlock; + +#if defined(TARGET_I386) || defined(TARGET_X86_64) struct CPUX86State; +#define CPUArchState struct CPUX86State +#elif defined(TARGET_ARM) +struct CPUARMState; +#define CPUArchState struct CPUARMState +#else +#error Unsupported target architecture +#endif -typedef uintptr_t (*se_libcpu_tb_exec_t)(struct CPUX86State *env1, struct TranslationBlock *tb); -typedef void (*se_do_interrupt_all_t)(int intno, int is_int, int error_code, uintptr_t next_eip, int is_hw); +typedef uintptr_t (*se_libcpu_tb_exec_t)(CPUArchState *env1, struct TranslationBlock *tb); +typedef void (*se_do_interrupt_all_t)(int intno, int is_int, int error_code, uintptr_t next_eip, int is_hw); +typedef void (*se_do_interrupt_arm_t)(CPUArchState *env1); void se_do_interrupt_all(int intno, int is_int, int error_code, target_ulong next_eip, int is_hw); - +void se_do_interrupt_arm(CPUArchState *env1); #define MEM_TRACE_FLAG_IO 1 #define MEM_TRACE_FLAG_WRITE 2 #define MEM_TRACE_FLAG_PRECISE 4 @@ -75,7 +85,7 @@ struct se_libcpu_interface_t { se_libcpu_tb_exec_t tb_exec; se_do_interrupt_all_t do_interrupt_all; - + se_do_interrupt_arm_t do_interrupt_arm; unsigned *clock_scaling_factor; } exec; @@ -92,14 +102,14 @@ struct se_libcpu_interface_t { struct tlb { void (*flush_tlb_cache)(void); void (*flush_tlb_cache_page)(void *objectState, int mmu_idx, int index); - void (*update_tlb_entry)(struct CPUX86State *env, int mmu_idx, uint64_t virtAddr, uint64_t hostAddr); + void (*update_tlb_entry)(CPUArchState *env, int mmu_idx, uint64_t virtAddr, uint64_t hostAddr); } tlb; /* Register access */ struct regs { void (*read_concrete)(unsigned offset, uint8_t *buf, unsigned size); void (*write_concrete)(unsigned offset, uint8_t *buf, unsigned size); - void (*set_cc_op_eflags)(struct CPUX86State *state); + void (*set_cc_op_eflags)(CPUArchState *state); } regs; /* Memory accessors */ @@ -148,8 +158,15 @@ struct se_libcpu_interface_t { /* Internal functions in libcpu. */ struct libcpu { +#if defined(TARGET_I386) || defined(TARGET_X86_64) uint32_t (*ldub_code)(struct CPUX86State *env, target_ulong virtual_address); uint32_t (*ldl_code)(struct CPUX86State *env, target_ulong virtual_address); +#elif defined(TARGET_ARM) + uint32_t (*ldub_code)(struct CPUARMState *env, target_ulong virtual_address); + uint32_t (*ldl_code)(struct CPUARMState *env, target_ulong virtual_address); +#else +#error Unsupported target architecture +#endif } libcpu; /* Core plugin interface */ @@ -246,15 +263,17 @@ void tcg_llvm_before_memory_access(target_ulong vaddr, uint64_t value, unsigned void tcg_llvm_after_memory_access(target_ulong vaddr, uint64_t value, unsigned size, unsigned flags, uintptr_t retaddr); -// XXX: change bits to bytes -uint64_t tcg_llvm_trace_port_access(uint64_t port, uint64_t value, unsigned bits, int isWrite); - uint64_t tcg_llvm_trace_mmio_access(uint64_t physaddr, uint64_t value, unsigned bytes, int isWrite); void tcg_llvm_write_mem_io_vaddr(uint64_t value, int reset); +#if defined(TARGET_I386) || defined(TARGET_X86_64) +// XXX: change bits to bytes +uint64_t tcg_llvm_trace_port_access(uint64_t port, uint64_t value, unsigned bits, int isWrite); void tcg_llvm_get_value(void *addr, unsigned nbytes, bool addConstraint); #endif +#endif + #ifdef __cplusplus } #endif diff --git a/include/cpu/se_libcpu_config.h b/include/cpu/se_libcpu_config.h index 5655668..751aa75 100644 --- a/include/cpu/se_libcpu_config.h +++ b/include/cpu/se_libcpu_config.h @@ -32,20 +32,20 @@ extern "C" { /** This defines the size of each MemoryObject that represents physical RAM. Larger values save some memory, smaller (exponentially) decrease solving time for constraints with symbolic addresses */ - -#ifdef SE_ENABLE_TLB +//#ifdef SE_ENABLE_TLB // XXX: Use TARGET_PAGE_BITS somehow... -#define SE_RAM_OBJECT_BITS 12 -#else +//#define SE_RAM_OBJECT_BITS 12 +//#else /* Do not touch this */ + #define SE_RAM_OBJECT_BITS TARGET_PAGE_BITS -#endif +//#endif /** Force page sizes to be the native size. A symbex engine could perform dynamic page splitting in case of symbolic addresses, so there is no need to tweak this value anymore. */ -#if SE_RAM_OBJECT_BITS != 12 || !defined(SE_ENABLE_TLB) -#error Incorrect TLB configuration -#endif +//#if SE_RAM_OBJECT_BITS != 12 || !defined(SE_ENABLE_TLB) +//#error Incorrect TLB configuration +//#endif #define SE_RAM_OBJECT_SIZE (1 << SE_RAM_OBJECT_BITS) #define SE_RAM_OBJECT_MASK (~(SE_RAM_OBJECT_SIZE - 1)) diff --git a/src/target-arm/cpu.h b/src/target-arm/cpu.h index 8aba7ba..3f3ff1b 100644 --- a/src/target-arm/cpu.h +++ b/src/target-arm/cpu.h @@ -152,30 +152,30 @@ static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) { env->cp15.c13_tls2 = newtls; } -#define CPSR_M (0x1f) -#define CPSR_T (1 << 5) -#define CPSR_F (1 << 6) -#define CPSR_I (1 << 7) -#define CPSR_A (1 << 8) -#define CPSR_E (1 << 9) -#define CPSR_IT_2_7 (0xfc00) -#define CPSR_GE (0xf << 16) -#define CPSR_RESERVED (0xf << 20) -#define CPSR_J (1 << 24) -#define CPSR_IT_0_1 (3 << 25) -#define CPSR_Q (1 << 27) -#define CPSR_V (1 << 28) -#define CPSR_C (1 << 29) -#define CPSR_Z (1 << 30) -#define CPSR_N (1 << 31) -#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) - -#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) -#define CACHED_CPSR_BITS (CPSR_T | CPSR_GE | CPSR_IT | CPSR_Q | CPSR_NZCV) -/* Bits writable in user mode. */ -#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) -/* Execution state bits. MRS read as zero, MSR writes ignored. */ -#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J) +/* #define CPSR_M (0x1f) */ +// #define CPSR_T (1 << 5) +// #define CPSR_F (1 << 6) +// #define CPSR_I (1 << 7) +// #define CPSR_A (1 << 8) +// #define CPSR_E (1 << 9) +// #define CPSR_IT_2_7 (0xfc00) +// #define CPSR_GE (0xf << 16) +// #define CPSR_RESERVED (0xf << 20) +// #define CPSR_J (1 << 24) +// #define CPSR_IT_0_1 (3 << 25) +// #define CPSR_Q (1 << 27) +// #define CPSR_V (1 << 28) +// #define CPSR_C (1 << 29) +// #define CPSR_Z (1 << 30) +// #define CPSR_N (1 << 31) +// #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) + +// #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) +// #define CACHED_CPSR_BITS (CPSR_T | CPSR_GE | CPSR_IT | CPSR_Q | CPSR_NZCV) +// [> Bits writable in user mode. <] +// #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) +// [> Execution state bits. MRS read as zero, MSR writes ignored. <] +/* #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J) */ /* Return the current CPSR value. */ uint32_t cpsr_read(CPUARMState *env); @@ -220,15 +220,15 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); -enum arm_cpu_mode { - ARM_CPU_MODE_USR = 0x10, - ARM_CPU_MODE_FIQ = 0x11, - ARM_CPU_MODE_IRQ = 0x12, - ARM_CPU_MODE_SVC = 0x13, - ARM_CPU_MODE_ABT = 0x17, - ARM_CPU_MODE_UND = 0x1b, - ARM_CPU_MODE_SYS = 0x1f -}; +/* enum arm_cpu_mode { */ + // ARM_CPU_MODE_USR = 0x10, + // ARM_CPU_MODE_FIQ = 0x11, + // ARM_CPU_MODE_IRQ = 0x12, + // ARM_CPU_MODE_SVC = 0x13, + // ARM_CPU_MODE_ABT = 0x17, + // ARM_CPU_MODE_UND = 0x1b, + // ARM_CPU_MODE_SYS = 0x1f +/* }; */ /* VFP system registers. */ #define ARM_VFP_FPSID 0 @@ -403,9 +403,9 @@ void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, ARMReadCPFunc *cp_read, ARMW #define MMU_MODE1_SUFFIX _user #define MMU_USER_IDX 1 -static inline int cpu_mmu_index(CPUARMState *env) { - return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; -} +/* static inline int cpu_mmu_index(CPUARMState *env) { */ + // return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; +// } #include "cpu-all.h" diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index f520d70..7a19679 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -410,7 +410,6 @@ CPUARMState *cpu_arm_init(const char *cpu_model) { } env->cp15.c0_cpuid = id; - cpu_state_reset(env); // if (arm_feature(env, ARM_FEATURE_NEON)) { // gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); @@ -421,9 +420,18 @@ CPUARMState *cpu_arm_init(const char *cpu_model) { // } qemu_init_vcpu(env); + //move state reset to do_cpu_arm_init + //cpu_state_reset(env); return env; } + +void do_cpu_arm_init(CPUARMState *env) { + + cpu_state_reset(env); + +} + void arm_cpu_set_irq(CPUARMState *env, int level) { if (level) { @@ -433,7 +441,6 @@ void arm_cpu_set_irq(CPUARMState *env, int level) { } } - struct arm_cpu_t { uint32_t id; const char *name; @@ -792,8 +799,18 @@ void do_interrupt_v7m(CPUARMState *env) } +#ifdef CONFIG_SYMBEX +#include +/* This will be called from S2EExecutor if running concretely; It will + in turn call the real ARM IRQ handler with current CPUARMState.*/ +void do_interrupt(CPUARMState *env){ + g_sqi.exec.do_interrupt_arm(env); +} +void se_do_interrupt_arm(CPUARMState *env) { +#else /* Handle a CPU exception. */ void do_interrupt(CPUARMState *env) { +#endif uint32_t addr; uint32_t mask; int new_mode; diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 918120f..622f3e2 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -22,15 +22,9 @@ #include "helper.h" - - - #define SIGNBIT (uint32_t) 0x80000000 #define SIGNBIT64 ((uint64_t) 1 << 63) -#ifdef SYMBEX_LLVM_LIB -#include "llvm-lib.h" -#endif struct CPUARMState *env = 0; @@ -81,30 +75,24 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t max #define _raw _raw_symb #define SHIFT 0 -#include "softmmu_header.h" +#include "softmmu_template.h" #define SHIFT 1 -#include "softmmu_header.h" +#include "softmmu_template.h" #define SHIFT 2 -#include "softmmu_header.h" +#include "softmmu_template.h" #define SHIFT 3 -#include "softmmu_header.h" +#include "softmmu_template.h" #undef _raw -#endif -#ifdef CONFIG_SYMBEX -#include -/* This will be called from S2EExecutor if running concretely; It will - in turn call the real ARM IRQ handler with current CPUARMState.*/ -void s2e_do_interrupt(void) { - s2e_helper_do_interrupt(env); -} #endif + + /* try to fill the TLB and return an exception if error. If retaddr is NULL, it means that the function was called in C code (i.e. not from generated code or from helper.c) */ @@ -121,7 +109,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int env = env1; #ifdef CONFIG_SYMBEX - s2e_on_tlb_miss(g_s2e, g_s2e_state, addr, is_write); + if (unlikely(*g_sqi.events.on_tlb_miss_signals_count)) { + g_sqi.events.on_tlb_miss(addr, is_write, retaddr); + } ret = cpu_arm_handle_mmu_fault(env, page_addr, is_write, mmu_idx); #else ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx); @@ -147,10 +137,12 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int a virtual CPU fault */ cpu_restore_state(tb, env, pc); } - } + }; #ifdef CONFIG_SYMBEX - s2e_on_page_fault(g_s2e, g_s2e_state, addr, is_write); + if (unlikely(*g_sqi.events.on_page_fault_signals_count)) { + g_sqi.events.on_page_fault(addr, is_write, retaddr); + } #endif raise_exception(env->exception_index); diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 3cd56cd..2aa1ebf 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -76,6 +76,22 @@ typedef struct DisasContext { int vfp_enabled; int vec_len; int vec_stride; +#ifdef CONFIG_SYMBEX + void *cpuState; + target_ulong insPc; /* pc of the instruction being translated */ + int useNextPc; /* indicates whether nextPc is valid */ + target_ulong nextPc; /* pc of the instruction following insPc */ + int enable_jmp_im; + int done_instr_end; /* 1 when onTranslateInstructionEnd was called */ + + // Pointer to tcg pointer for the current instruction + uint16_t *ins_opc; + TCGArg *ins_arg; + + int done_reg_access_end; /* 1 when onTranslateRegisterAccess was called */ + int instrument; /* 1 when it is ok to call plugin code */ + int invalid_instr; /* tb contains invalid instruction */ +#endif } DisasContext; static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; From c1529d285df3ff6857cf39e9d446601cb30bcb7a Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 17 Oct 2019 17:53:30 -0400 Subject: [PATCH 07/59] Applied clang format Signed-off-by: chaojixx --- include/cpu/apic.h | 2 - include/cpu/arm/cpu.h | 7 +- include/cpu/arm/kvm_arm.h | 352 +- include/cpu/kvm.h | 16 +- include/cpu/se_libcpu.h | 1 - src/bswap.h | 9 +- src/cpu-all.h | 8 +- src/cpu-exec.c | 12 +- src/disas.c | 1 - src/exec-all.h | 8 +- src/softmmu_template.h | 2 +- src/target-arm/cpu.h | 136 +- src/target-arm/helper.c | 128 +- src/target-arm/helper.h | 13 +- src/target-arm/iwmmxt_helper.c | 710 +- src/target-arm/neon_helper.c | 1388 ++-- src/target-arm/op_addsub.h | 31 +- src/target-arm/op_helper.c | 13 +- src/target-arm/translate.c | 13046 ++++++++++++++++--------------- 19 files changed, 7899 insertions(+), 7984 deletions(-) diff --git a/include/cpu/apic.h b/include/cpu/apic.h index d7cb5ef..969e852 100644 --- a/include/cpu/apic.h +++ b/include/cpu/apic.h @@ -31,12 +31,10 @@ #include #include - #ifdef __cplusplus extern "C" { #endif - struct DeviceState; typedef struct DeviceState DeviceState; diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index d843016..aa39808 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -145,7 +145,6 @@ typedef struct CPUARMState { uint32_t teecr; uint32_t teehbr; - /* VFP coprocessor state. */ struct { float64 regs[32]; @@ -191,7 +190,7 @@ typedef struct CPUARMState { CPU_COMMON /* These fields after the common ones so they are preserved on reset. */ - + /* Internal CPU feature flags. */ uint32_t features; /* Coprocessor IO used by peripherals */ @@ -206,12 +205,12 @@ typedef struct CPUARMState { /* For KVM */ int kvm_request_interrupt_window; int kvm_irq; - int kvm_exit_code; //now only used for msr + int kvm_exit_code; // now only used for msr uint8_t timer_interrupt_disabled; } CPUARMState; CPUARMState *cpu_arm_init(const char *cpu_model); -void do_cpu_arm_init(CPUARMState *env); +void do_cpu_arm_init(CPUARMState *env); int cpu_arm_exec(CPUARMState *s); void arm_cpu_set_irq(CPUARMState *env, int level); diff --git a/include/cpu/arm/kvm_arm.h b/include/cpu/arm/kvm_arm.h index e05b413..27b2c14 100644 --- a/include/cpu/arm/kvm_arm.h +++ b/include/cpu/arm/kvm_arm.h @@ -20,9 +20,9 @@ #ifndef __ARM_KVM_H__ #define __ARM_KVM_H__ -#include -#include #include +#include +#include #define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE @@ -31,51 +31,50 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) +#define KVM_REG_SIZE(id) (1U << (((id) &KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) /* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] +#define KVM_ARM_SVC_sp svc_regs[0] +#define KVM_ARM_SVC_lr svc_regs[1] +#define KVM_ARM_SVC_spsr svc_regs[2] +#define KVM_ARM_ABT_sp abt_regs[0] +#define KVM_ARM_ABT_lr abt_regs[1] +#define KVM_ARM_ABT_spsr abt_regs[2] +#define KVM_ARM_UND_sp und_regs[0] +#define KVM_ARM_UND_lr und_regs[1] +#define KVM_ARM_UND_spsr und_regs[2] +#define KVM_ARM_IRQ_sp irq_regs[0] +#define KVM_ARM_IRQ_lr irq_regs[1] +#define KVM_ARM_IRQ_spsr irq_regs[2] /* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] +#define KVM_ARM_FIQ_r8 fiq_regs[0] +#define KVM_ARM_FIQ_r9 fiq_regs[1] +#define KVM_ARM_FIQ_r10 fiq_regs[2] +#define KVM_ARM_FIQ_fp fiq_regs[3] +#define KVM_ARM_FIQ_ip fiq_regs[4] +#define KVM_ARM_FIQ_sp fiq_regs[5] +#define KVM_ARM_FIQ_lr fiq_regs[6] +#define KVM_ARM_FIQ_spsr fiq_regs[7] struct kvm_regs { - struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ - unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ - unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ + struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ + unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ + unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ + unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ + unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ + unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ }; struct kvm_m_regs { - __u32 regs[16]; + __u32 regs[16]; }; struct kvm_m_sregs { - __u32 other_sp; - __u32 vecbase; - __u32 basepri; - __u32 control; + __u32 other_sp; + __u32 vecbase; + __u32 basepri; + __u32 control; int current_sp; int exception; int pending_exception; @@ -83,89 +82,83 @@ struct kvm_m_sregs { void *nvic; }; - /* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_TARGET_CORTEX_A7 1 -#define KVM_ARM_NUM_TARGETS 2 +#define KVM_ARM_TARGET_CORTEX_A15 0 +#define KVM_ARM_TARGET_CORTEX_A7 1 +#define KVM_ARM_NUM_TARGETS 2 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ -#define KVM_ARM_DEVICE_TYPE_SHIFT 0 -#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) -#define KVM_ARM_DEVICE_ID_SHIFT 16 -#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) +#define KVM_ARM_DEVICE_TYPE_SHIFT 0 +#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) +#define KVM_ARM_DEVICE_ID_SHIFT 16 +#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) /* Supported device IDs */ -#define KVM_ARM_DEVICE_VGIC_V2 0 +#define KVM_ARM_DEVICE_VGIC_V2 0 /* Supported VGIC address types */ -#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 -#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 +#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 +#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 -#define KVM_VGIC_V2_DIST_SIZE 0x1000 -#define KVM_VGIC_V2_CPU_SIZE 0x2000 +#define KVM_VGIC_V2_DIST_SIZE 0x1000 +#define KVM_VGIC_V2_CPU_SIZE 0x2000 /* Supported VGICv3 address types */ -#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 -#define KVM_VGIC_ITS_ADDR_TYPE 4 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 +#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 +#define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 -#define KVM_VGIC_V3_DIST_SIZE SZ_64K -#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) -#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) +#define KVM_VGIC_V3_DIST_SIZE SZ_64K +#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) +#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ -#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ +#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ +#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; + __u32 target; + __u32 features[7]; }; -struct kvm_sregs { -}; +struct kvm_sregs {}; -struct kvm_fpu { -}; +struct kvm_fpu {}; -struct kvm_guest_debug_arch { -}; +struct kvm_guest_debug_arch {}; -struct kvm_debug_exit_arch { -}; +struct kvm_debug_exit_arch {}; struct kvm_sync_regs { - /* Used with KVM_CAP_ARM_USER_IRQ */ - __u64 device_irq_level; + /* Used with KVM_CAP_ARM_USER_IRQ */ + __u64 device_irq_level; }; -struct kvm_arch_memory_slot { -}; +struct kvm_arch_memory_slot {}; /* for KVM_GET/SET_VCPU_EVENTS */ struct kvm_vcpu_events { - struct { - __u8 serror_pending; - __u8 serror_has_esr; - /* Align it to 8 bytes */ - __u8 pad[6]; - __u64 serror_esr; - } exception; - __u32 reserved[12]; + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; }; /* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 +#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 +#define KVM_REG_ARM_COPROC_SHIFT 16 +#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 +#define KVM_REG_ARM_32_OPC2_SHIFT 0 +#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 +#define KVM_REG_ARM_OPC1_SHIFT 3 +#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 +#define KVM_REG_ARM_CRM_SHIFT 7 +#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 +#define KVM_REG_ARM_32_CRN_SHIFT 11 /* * For KVM currently all guest registers are nonsecure, but we reserve a bit * in the encoding to distinguish secure from nonsecure for AArch32 system @@ -173,119 +166,111 @@ struct kvm_vcpu_events { * register, and 0 for the nonsecure banked register or if the register is * not banked by security. */ -#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 -#define KVM_REG_ARM_SECURE_SHIFT 28 +#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 +#define KVM_REG_ARM_SECURE_SHIFT 28 -#define ARM_CP15_REG_SHIFT_MASK(x,n) \ - (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) +#define ARM_CP15_REG_SHIFT_MASK(x, n) (((x) << KVM_REG_ARM_##n##_SHIFT) & KVM_REG_ARM_##n##_MASK) -#define __ARM_CP15_REG(op1,crn,crm,op2) \ - (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ - ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ - ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ - ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ - ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) +#define __ARM_CP15_REG(op1, crn, crm, op2) \ + (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ + ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | ARM_CP15_REG_SHIFT_MASK(crm, CRM) | ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) #define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) -#define __ARM_CP15_REG64(op1,crm) \ - (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) +#define __ARM_CP15_REG64(op1, crm) (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) /* PL1 Physical Timer Registers */ -#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) -#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) -#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) +#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) +#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) /* Virtual Timer Registers */ -#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) -#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) +#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) +#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) /* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) +#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) /* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 +#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 +#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 +#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) +#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF +#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 /* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A +#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF +#define KVM_REG_ARM_VFP_BASE_REG 0x0 +#define KVM_REG_ARM_VFP_FPSID 0x1000 +#define KVM_REG_ARM_VFP_FPSCR 0x1001 +#define KVM_REG_ARM_VFP_MVFR1 0x1006 +#define KVM_REG_ARM_VFP_MVFR0 0x1007 +#define KVM_REG_ARM_VFP_FPEXC 0x1008 +#define KVM_REG_ARM_VFP_FPINST 0x1009 +#define KVM_REG_ARM_VFP_FPINST2 0x100A /* KVM-as-firmware specific pseudo-registers */ -#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_FW | ((r) & 0xffff)) -#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_FW | ((r) &0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) /* Device Control API: ARM VGIC */ -#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 -#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 -#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 -#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 -#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) -#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 -#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \ - (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) -#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 -#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) -#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) -#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 -#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 +#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) +#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) +#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 +#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 -#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 -#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ - (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) +#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 +#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 +#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 +#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff -#define VGIC_LEVEL_INFO_LINE_LEVEL 0 +#define VGIC_LEVEL_INFO_LINE_LEVEL 0 /* Device Control API on vcpu fd */ -#define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_TIMER_CTRL 1 -#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 -#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 - -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 -#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 -#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 -#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 -#define KVM_DEV_ARM_ITS_CTRL_RESET 4 +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 +#define KVM_ARM_VCPU_TIMER_CTRL 1 +#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 +#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 + +#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 +#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 +#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff +#define KVM_ARM_IRQ_TYPE_SHIFT 24 +#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_VCPU_SHIFT 16 +#define KVM_ARM_IRQ_VCPU_MASK 0xff +#define KVM_ARM_IRQ_NUM_SHIFT 0 +#define KVM_ARM_IRQ_NUM_MASK 0xffff /* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 +#define KVM_ARM_IRQ_TYPE_CPU 0 +#define KVM_ARM_IRQ_TYPE_SPI 1 +#define KVM_ARM_IRQ_TYPE_PPI 2 /* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 +#define KVM_ARM_IRQ_CPU_IRQ 0 +#define KVM_ARM_IRQ_CPU_FIQ 1 /* * This used to hold the highest supported SPI, but it is now obsolete @@ -293,25 +278,24 @@ struct kvm_vcpu_events { * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. */ #ifndef __KERNEL__ -#define KVM_ARM_IRQ_GIC_MAX 127 +#define KVM_ARM_IRQ_GIC_MAX 127 #endif /* One single KVM irqchip, ie. the VGIC */ -#define KVM_NR_IRQCHIPS 1 +#define KVM_NR_IRQCHIPS 1 /* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) +#define KVM_PSCI_FN_BASE 0x95c1ba5e +#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) +#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) +#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) +#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) +#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) -#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS -#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED -#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS -#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED +#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED #endif /* __ARM_KVM_H__ */ - diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index 5f9dd94..8a8d97e 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -9,14 +9,12 @@ #include - #ifndef BIT #define BIT(n) (1 << (n)) #endif - #include -#if defined(TARGET_I386)|| defined(TARGET_X86_64) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include @@ -24,7 +22,6 @@ #error unsupported target CPU #endif - #include #define KVM_API_VERSION 12 @@ -327,8 +324,8 @@ struct kvm_run { /* KVM_EXIT_SYSTEM_EVENT */ struct { #define KVM_SYSTEM_EVENT_SHUTDOWN 1 -#define KVM_SYSTEM_EVENT_RESET 2 -#define KVM_SYSTEM_EVENT_CRASH 3 +#define KVM_SYSTEM_EVENT_RESET 2 +#define KVM_SYSTEM_EVENT_CRASH 3 __u32 type; __u64 flags; } system_event; @@ -948,8 +945,7 @@ struct kvm_dirty_tlb { #define KVM_REG_SIZE_U512 0x0060000000000000ULL #define KVM_REG_SIZE_U1024 0x0070000000000000ULL -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) +#define KVM_REG_SIZE(id) (1U << (((id) &KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) struct kvm_reg_list { __u64 n; /* number of regs */ @@ -1107,7 +1103,6 @@ struct kvm_s390_ucas_mapping { */ #define KVM_RUN _IO(KVMIO, 0x80) - #define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) #define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) #define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) @@ -1118,9 +1113,6 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_M_SREGS _IOR(KVMIO, 0xc2, struct kvm_m_sregs) #define KVM_SET_M_SREGS _IOW(KVMIO, 0xc3, struct kvm_m_sregs) - - - #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index 4e37cb5..e57e4b0 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -41,7 +41,6 @@ struct CPUARMState; #error Unsupported target architecture #endif - typedef uintptr_t (*se_libcpu_tb_exec_t)(CPUArchState *env1, struct TranslationBlock *tb); typedef void (*se_do_interrupt_all_t)(int intno, int is_int, int error_code, uintptr_t next_eip, int is_hw); typedef void (*se_do_interrupt_arm_t)(CPUArchState *env1); diff --git a/src/bswap.h b/src/bswap.h index 227d7b0..5962918 100644 --- a/src/bswap.h +++ b/src/bswap.h @@ -267,7 +267,7 @@ typedef union { } CPU_QuadU; static inline int ldub_p(const void *ptr) { - return *(uint8_t *) ptr; + return *(uint8_t *) ptr; } static inline int ldsb_p(const void *ptr) { @@ -278,11 +278,10 @@ static inline void stb_p(void *ptr, int v) { *(uint8_t *) ptr = v; } #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) -static inline int lduw_le_p(const void *ptr) -{ +static inline int lduw_le_p(const void *ptr) { #ifdef _ARCH_PPC int val; - __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); + __asm__ __volatile__("lhbrx %0,0,%1" : "=r"(val) : "r"(ptr)); return val; #else const uint8_t *p = ptr; @@ -291,7 +290,7 @@ static inline int lduw_le_p(const void *ptr) } #else static inline int lduw_le_p(const void *ptr) { - return *(uint16_t *) ptr; + return *(uint16_t *) ptr; } #endif static inline int ldsw_le_p(const void *ptr) { diff --git a/src/cpu-all.h b/src/cpu-all.h index cd0e229..452b594 100644 --- a/src/cpu-all.h +++ b/src/cpu-all.h @@ -19,10 +19,10 @@ #ifndef CPU_ALL_H #define CPU_ALL_H -#include "bswap.h" -#include "qemu-common.h" #include #include +#include "bswap.h" +#include "qemu-common.h" #if defined(TARGET_I386)|| defined(TARGET_X86_64) #include @@ -32,8 +32,6 @@ #error unsupported target CPU #endif - - #ifdef CONFIG_SYMBEX #include #include @@ -152,7 +150,6 @@ #define stfl_be_raw(p, v) stfl_be_p(saddr((p)), v) #define stfq_be_raw(p, v) stfq_be_p(saddr((p)), v) - #else /* CONFIG_SYMBEX */ static inline int _se_check_concrete(void *objectState, target_ulong offset, int size) { @@ -359,5 +356,4 @@ void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data); #define CPU_LOG_LLVM_IR (1 << 10) #define CPU_LOG_LLVM_ASM (1 << 11) - #endif /* CPU_ALL_H */ diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 82da0bb..4cb018e 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -128,7 +128,8 @@ static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, target } not_found: /* if no translated code available, then translate it now */ - DPRINTF(" if no translated code available, then translate it now pc=0x%x, cs_base=0x%x, flags= 0x%lx\n", pc, cs_base, flags); + DPRINTF(" if no translated code available, then translate it now pc=0x%x, cs_base=0x%x, flags= 0x%lx\n", pc, + cs_base, flags); tb = tb_gen_code(env, pc, cs_base, flags, 0); ++g_cpu_stats.tb_regens; @@ -160,8 +161,8 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env) { tb_flush(env); } #endif - - DPRINTF("Current pc=0x%x: \n",env->regs[15]); + + DPRINTF("Current pc=0x%x: \n", env->regs[15]); /* we record a subset of the CPU state. It will always be the same before a given translated block @@ -358,7 +359,6 @@ static bool process_interrupt_request(CPUArchState *env) { #error Unsupported target architecture #endif - if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; @@ -452,7 +452,7 @@ static bool process_interrupt_request(CPUArchState *env) { // in case basepri has not been synced so add exit code condition if (interrupt_request & CPU_INTERRUPT_HARD && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I)) && - (armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { + (armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; @@ -666,8 +666,6 @@ int cpu_exec(CPUArchState *env) { #error Unsupported target architecture #endif - - env->current_tb = NULL; #if defined(TARGET_I386) diff --git a/src/disas.c b/src/disas.c index 60859d3..720cf59 100644 --- a/src/disas.c +++ b/src/disas.c @@ -26,7 +26,6 @@ #error unsupported target CPU #endif - #include #include diff --git a/src/exec-all.h b/src/exec-all.h index c5191b8..e8320ca 100644 --- a/src/exec-all.h +++ b/src/exec-all.h @@ -147,13 +147,11 @@ extern int tb_invalidated_flag; #define DATA_SIZE 8 #include "softmmu_header.h" - #undef ACCESS_TYPE #undef MEMSUFFIX #undef env #if defined(TARGET_ARM) -static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) -{ +static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) { uint32_t insn = ldl_code(addr); if (do_swap) { return bswap32(insn); @@ -162,8 +160,7 @@ static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) } /* Ditto, for a halfword (Thumb) instruction */ -static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) -{ +static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) { uint16_t insn = lduw_code(addr); if (do_swap) { return bswap16(insn); @@ -172,7 +169,6 @@ static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) } #endif - tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); typedef void(CPUDebugExcpHandler)(CPUArchState *env); diff --git a/src/softmmu_template.h b/src/softmmu_template.h index 57b54cc..8119308 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -129,7 +129,6 @@ DATA_TYPE glue(glue(io_read, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_ void *retaddr) { DATA_TYPE res; const struct MemoryDescOps *ops = phys_get_ops(physaddr); - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; #if defined(CONFIG_SYMBEX) && defined(CONFIG_SYMBEX_MP) @@ -160,6 +159,7 @@ DATA_TYPE glue(glue(io_read, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_ env->mem_io_pc = (uintptr_t) retaddr; SE_SET_MEM_IO_VADDR(env, addr, 0); + #if SHIFT <= 2 res = ops->read(physaddr, 1 << SHIFT); #else diff --git a/src/target-arm/cpu.h b/src/target-arm/cpu.h index 3f3ff1b..8738cac 100644 --- a/src/target-arm/cpu.h +++ b/src/target-arm/cpu.h @@ -221,13 +221,13 @@ uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); /* enum arm_cpu_mode { */ - // ARM_CPU_MODE_USR = 0x10, - // ARM_CPU_MODE_FIQ = 0x11, - // ARM_CPU_MODE_IRQ = 0x12, - // ARM_CPU_MODE_SVC = 0x13, - // ARM_CPU_MODE_ABT = 0x17, - // ARM_CPU_MODE_UND = 0x1b, - // ARM_CPU_MODE_SYS = 0x1f +// ARM_CPU_MODE_USR = 0x10, +// ARM_CPU_MODE_FIQ = 0x11, +// ARM_CPU_MODE_IRQ = 0x12, +// ARM_CPU_MODE_SVC = 0x13, +// ARM_CPU_MODE_ABT = 0x17, +// ARM_CPU_MODE_UND = 0x1b, +// ARM_CPU_MODE_SYS = 0x1f /* }; */ /* VFP system registers. */ @@ -250,31 +250,31 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val); #define ARM_IWMMXT_wCGR3 11 /* enum arm_features { */ - // ARM_FEATURE_VFP, - // ARM_FEATURE_AUXCR, [> ARM1026 Auxiliary control register. <] - // ARM_FEATURE_XSCALE, [> Intel XScale extensions. <] - // ARM_FEATURE_IWMMXT, [> Intel iwMMXt extension. <] - // ARM_FEATURE_V6, - // ARM_FEATURE_V6K, - // ARM_FEATURE_V7, - // ARM_FEATURE_THUMB2, - // ARM_FEATURE_MPU, [> Only has Memory Protection Unit, not full MMU. <] - // ARM_FEATURE_VFP3, - // ARM_FEATURE_VFP_FP16, - // ARM_FEATURE_NEON, - // ARM_FEATURE_THUMB_DIV, [> divide supported in Thumb encoding <] - // ARM_FEATURE_M, [> Microcontroller profile. <] - // ARM_FEATURE_OMAPCP, [> OMAP specific CP15 ops handling. <] - // ARM_FEATURE_THUMB2EE, - // ARM_FEATURE_V7MP, [> v7 Multiprocessing Extensions <] - // ARM_FEATURE_V4T, - // ARM_FEATURE_V5, - // ARM_FEATURE_STRONGARM, - // ARM_FEATURE_VAPA, [> cp15 VA to PA lookups <] - // ARM_FEATURE_ARM_DIV, [> divide supported in ARM encoding <] - // ARM_FEATURE_VFP4, [> VFPv4 (implies that NEON is v2) <] - // ARM_FEATURE_GENERIC_TIMER, - // ARM_FEATURE_MVFR, [> Media and VFP Feature Registers 0 and 1 <] +// ARM_FEATURE_VFP, +// ARM_FEATURE_AUXCR, [> ARM1026 Auxiliary control register. <] +// ARM_FEATURE_XSCALE, [> Intel XScale extensions. <] +// ARM_FEATURE_IWMMXT, [> Intel iwMMXt extension. <] +// ARM_FEATURE_V6, +// ARM_FEATURE_V6K, +// ARM_FEATURE_V7, +// ARM_FEATURE_THUMB2, +// ARM_FEATURE_MPU, [> Only has Memory Protection Unit, not full MMU. <] +// ARM_FEATURE_VFP3, +// ARM_FEATURE_VFP_FP16, +// ARM_FEATURE_NEON, +// ARM_FEATURE_THUMB_DIV, [> divide supported in Thumb encoding <] +// ARM_FEATURE_M, [> Microcontroller profile. <] +// ARM_FEATURE_OMAPCP, [> OMAP specific CP15 ops handling. <] +// ARM_FEATURE_THUMB2EE, +// ARM_FEATURE_V7MP, [> v7 Multiprocessing Extensions <] +// ARM_FEATURE_V4T, +// ARM_FEATURE_V5, +// ARM_FEATURE_STRONGARM, +// ARM_FEATURE_VAPA, [> cp15 VA to PA lookups <] +// ARM_FEATURE_ARM_DIV, [> divide supported in ARM encoding <] +// ARM_FEATURE_VFP4, [> VFPv4 (implies that NEON is v2) <] +// ARM_FEATURE_GENERIC_TIMER, +// ARM_FEATURE_MVFR, [> Media and VFP Feature Registers 0 and 1 <] /* }; */ enum arm_features { ARM_FEATURE_VFP, @@ -290,53 +290,53 @@ enum arm_features { ARM_FEATURE_VFP_FP16, ARM_FEATURE_NEON, ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ - ARM_FEATURE_M, /* Microcontroller profile. */ - ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ + ARM_FEATURE_M, /* Microcontroller profile. */ + ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ ARM_FEATURE_THUMB2EE, - ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ ARM_FEATURE_V4T, ARM_FEATURE_V5, ARM_FEATURE_STRONGARM, - ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ + ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ - ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ + ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ ARM_FEATURE_GENERIC_TIMER, - ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ - ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ + ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ + ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ - ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ - ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ - ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ - ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ - ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ + ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ + ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ + ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ + ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ + ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ ARM_FEATURE_V8, - ARM_FEATURE_AARCH64, /* supports 64 bit mode */ - ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ - ARM_FEATURE_CBAR, /* has cp15 CBAR */ - ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ - ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ - ARM_FEATURE_EL2, /* has EL2 Virtualization support */ - ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ - ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ - ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ - ARM_FEATURE_PMU, /* has PMU support */ - ARM_FEATURE_VBAR, /* has cp15 VBAR */ + ARM_FEATURE_AARCH64, /* supports 64 bit mode */ + ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ + ARM_FEATURE_CBAR, /* has cp15 CBAR */ + ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ + ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ + ARM_FEATURE_EL2, /* has EL2 Virtualization support */ + ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ + ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ + ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ + ARM_FEATURE_PMU, /* has PMU support */ + ARM_FEATURE_VBAR, /* has cp15 VBAR */ ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ - ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ - ARM_FEATURE_SVE, /* has Scalable Vector Extension */ - ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */ + ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ + ARM_FEATURE_SVE, /* has Scalable Vector Extension */ + ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */ ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */ - ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */ + ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */ ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */ - ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ - ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */ - ARM_FEATURE_M_MAIN, /* M profile Main Extension */ + ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ + ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */ + ARM_FEATURE_M_MAIN, /* M profile Main Extension */ }; static inline int arm_feature(CPUARMState *env, int feature) { return (env->features & (1ULL << feature)) != 0; @@ -404,7 +404,7 @@ void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, ARMReadCPFunc *cp_read, ARMW #define MMU_USER_IDX 1 /* static inline int cpu_mmu_index(CPUARMState *env) { */ - // return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; +// return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; // } #include "cpu-all.h" diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 7a19679..819e535 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -331,7 +331,7 @@ void cpu_state_reset(CPUARMState *env) { env->kvm_irq = -1; } -//static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { +// static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { // int nregs; // // /* VFP data registers are always little-endian. */ @@ -363,7 +363,7 @@ void cpu_state_reset(CPUARMState *env) { // return 0; //} // -//static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) { +// static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) { // int nregs; // // nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; @@ -411,31 +411,29 @@ CPUARMState *cpu_arm_init(const char *cpu_model) { env->cp15.c0_cpuid = id; -// if (arm_feature(env, ARM_FEATURE_NEON)) { -// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); -// } else if (arm_feature(env, ARM_FEATURE_VFP3)) { -// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 35, "arm-vfp3.xml", 0); -// } else if (arm_feature(env, ARM_FEATURE_VFP)) { -// gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 19, "arm-vfp.xml", 0); -// } + // if (arm_feature(env, ARM_FEATURE_NEON)) { + // gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); + // } else if (arm_feature(env, ARM_FEATURE_VFP3)) { + // gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 35, "arm-vfp3.xml", 0); + // } else if (arm_feature(env, ARM_FEATURE_VFP)) { + // gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 19, "arm-vfp.xml", 0); + // } qemu_init_vcpu(env); - //move state reset to do_cpu_arm_init - //cpu_state_reset(env); + // move state reset to do_cpu_arm_init + // cpu_state_reset(env); return env; } - void do_cpu_arm_init(CPUARMState *env) { cpu_state_reset(env); - } void arm_cpu_set_irq(CPUARMState *env, int level) { - - if (level) { - cpu_interrupt(env,CPU_INTERRUPT_HARD); + + if (level) { + cpu_interrupt(env, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } @@ -581,8 +579,8 @@ uint32_t HELPER(uxtb16)(uint32_t x) { uint32_t HELPER(clz)(uint32_t x) { uint32_t res; - res= (uint32_t)clz32(x); - return res; + res = (uint32_t) clz32(x); + return res; } int32_t HELPER(sdiv)(int32_t num, int32_t den) { @@ -711,11 +709,9 @@ static void do_v7m_exception_exit(CPUARMState *env) { pointer. */ } - -void do_interrupt_v7m(CPUARMState *env) -{ - uint32_t addr; - uint32_t xpsr = xpsr_read(env); +void do_interrupt_v7m(CPUARMState *env) { + uint32_t addr; + uint32_t xpsr = xpsr_read(env); uint32_t lr; int exc; @@ -723,47 +719,47 @@ void do_interrupt_v7m(CPUARMState *env) lr = 0xfffffff1; if (env->v7m.current_sp) - lr |= 4; - if (env->v7m.exception == 0) - lr |= 8; - //printf("interreput = 0x%x\n",env->exception_index); + lr |= 4; + if (env->v7m.exception == 0) + lr |= 8; + // printf("interreput = 0x%x\n",env->exception_index); /* For exceptions we just mark as pending on the NVIC, and let that handle it. */ switch (env->exception_index) { - case EXCP_UDEF: - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); - return; - case EXCP_SWI: - /* The PC already points to the next instruction. */ - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, false); - return; - case EXCP_PREFETCH_ABORT: - case EXCP_DATA_ABORT: - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false); - break; - case EXCP_BKPT: - if (semihosting_enabled) { - int nr; - nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; - if (nr == 0xab) { - env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); - return; + case EXCP_UDEF: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); + return; + case EXCP_SWI: + /* The PC already points to the next instruction. */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, false); + return; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false); + break; + case EXCP_BKPT: + if (semihosting_enabled) { + int nr; + nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + if (nr == 0xab) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + return; + } } - } - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); - return; - case EXCP_IRQ: - armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); - armv7m_nvic_acknowledge_irq(env->nvic); - env->v7m.exception=exc; - break; - case EXCP_EXCEPTION_EXIT: - do_v7m_exception_exit(env); - return; - default: - cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); - return; /* Never happens. Keep compiler happy. */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); + return; + case EXCP_IRQ: + armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); + armv7m_nvic_acknowledge_irq(env->nvic); + env->v7m.exception = exc; + break; + case EXCP_EXCEPTION_EXIT: + do_v7m_exception_exit(env); + return; + default: + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); + return; /* Never happens. Keep compiler happy. */ } /* Align stack pointer. */ @@ -781,30 +777,28 @@ void do_interrupt_v7m(CPUARMState *env) v7m_push(env, env->regs[2]); v7m_push(env, env->regs[1]); v7m_push(env, env->regs[0]); - /* Now we've done everything that might cause a derived exception * we can go ahead and activate whichever exception we're going to * take (which might now be the derived exception). */ - + /* Switch to the msp stack. */ - switch_v7m_sp(env, 0); + switch_v7m_sp(env, 0); /* Clear IT bits */ env->condexec_bits = 0; env->regs[14] = lr; addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); env->regs[15] = addr & 0xfffffffe; env->thumb = addr & 1; - } #ifdef CONFIG_SYMBEX #include /* This will be called from S2EExecutor if running concretely; It will in turn call the real ARM IRQ handler with current CPUARMState.*/ -void do_interrupt(CPUARMState *env){ - g_sqi.exec.do_interrupt_arm(env); +void do_interrupt(CPUARMState *env) { + g_sqi.exec.do_interrupt_arm(env); } void se_do_interrupt_arm(CPUARMState *env) { #else @@ -2253,7 +2247,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { break; case 18: /* BASEPRI_MAX */ val &= 0xff; - if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)){ + if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) { env->v7m.basepri = val; env->kvm_exit_code = 1; cpu_exit(env); diff --git a/src/target-arm/helper.h b/src/target-arm/helper.h index 10458c3..b005fd4 100644 --- a/src/target-arm/helper.h +++ b/src/target-arm/helper.h @@ -20,7 +20,6 @@ #include - #define _RM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) #define _WM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) #define _AM_EXCP 0 @@ -96,11 +95,11 @@ DEF_HELPER_0(wfi, void) DEF_HELPER_2(get_r13_banked, i32, env, i32) DEF_HELPER_3(set_r13_banked, void, env, i32, i32) -//DEF_HELPER_2(get_r14_banked, i32, env, i32) -//DEF_HELPER_3(set_r14_banked, void, env, i32, i32) +// DEF_HELPER_2(get_r14_banked, i32, env, i32) +// DEF_HELPER_3(set_r14_banked, void, env, i32, i32) -//DEF_HELPER_2(get_spsr_banked, i32, env, i32) -//DEF_HELPER_3(set_spsr_banked, void, env, i32, i32) +// DEF_HELPER_2(get_spsr_banked, i32, env, i32) +// DEF_HELPER_3(set_spsr_banked, void, env, i32, i32) DEF_HELPER_3(v7m_msr, void, env, i32, i32) DEF_HELPER_2(v7m_mrs, i32, env, i32) @@ -294,9 +293,9 @@ DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32); DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32); DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64); DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) -//DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) +// DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) -//DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) +// DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) diff --git a/src/target-arm/iwmmxt_helper.c b/src/target-arm/iwmmxt_helper.c index 1dd8d1a..02d55af 100644 --- a/src/target-arm/iwmmxt_helper.c +++ b/src/target-arm/iwmmxt_helper.c @@ -19,8 +19,8 @@ * License along with this library; if not, see . */ -#include #include +#include #include "cpu.h" #include "exec-all.h" @@ -29,296 +29,200 @@ /* iwMMXt macros extracted from GNU gdb. */ /* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ -#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) -#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) -#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) -#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) +#define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) +#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) +#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) +#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) /* Flags to pass as "n" above. */ -#define SIMD_NBIT -1 -#define SIMD_ZBIT -2 -#define SIMD_CBIT -3 -#define SIMD_VBIT -4 +#define SIMD_NBIT -1 +#define SIMD_ZBIT -2 +#define SIMD_CBIT -3 +#define SIMD_VBIT -4 /* Various status bit macros. */ -#define NBIT8(x) ((x) & 0x80) -#define NBIT16(x) ((x) & 0x8000) -#define NBIT32(x) ((x) & 0x80000000) -#define NBIT64(x) ((x) & 0x8000000000000000ULL) -#define ZBIT8(x) (((x) & 0xff) == 0) -#define ZBIT16(x) (((x) & 0xffff) == 0) -#define ZBIT32(x) (((x) & 0xffffffff) == 0) -#define ZBIT64(x) (x == 0) +#define NBIT8(x) ((x) &0x80) +#define NBIT16(x) ((x) &0x8000) +#define NBIT32(x) ((x) &0x80000000) +#define NBIT64(x) ((x) &0x8000000000000000ULL) +#define ZBIT8(x) (((x) &0xff) == 0) +#define ZBIT16(x) (((x) &0xffff) == 0) +#define ZBIT32(x) (((x) &0xffffffff) == 0) +#define ZBIT64(x) (x == 0) /* Sign extension macros. */ -#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) -#define EXTEND8(a) ((uint32_t) (int8_t) (a)) -#define EXTEND16(a) ((uint32_t) (int16_t) (a)) -#define EXTEND16S(a) ((int32_t) (int16_t) (a)) -#define EXTEND32(a) ((uint64_t) (int32_t) (a)) - -uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) -{ - a = (( - EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + - EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff) - ) & 0xffffffff) | ((uint64_t) ( - EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + - EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff) - ) << 32); +#define EXTEND8H(a) ((uint16_t)(int8_t)(a)) +#define EXTEND8(a) ((uint32_t)(int8_t)(a)) +#define EXTEND16(a) ((uint32_t)(int16_t)(a)) +#define EXTEND16S(a) ((int32_t)(int16_t)(a)) +#define EXTEND32(a) ((uint64_t)(int32_t)(a)) + +uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) { + a = ((EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + + EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)) & + 0xffffffff) | + ((uint64_t)(EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + + EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff)) + << 32); return a; } -uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) -{ - a = (( - ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + - ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff) - ) & 0xffffffff) | (( - ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + - ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff) - ) << 32); +uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) { + a = ((((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) | + ((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32); return a; } -uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) { #define abs(x) (((x) >= 0) ? x : -x) #define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff)) - return - SADB(0) + SADB(8) + SADB(16) + SADB(24) + - SADB(32) + SADB(40) + SADB(48) + SADB(56); + return SADB(0) + SADB(8) + SADB(16) + SADB(24) + SADB(32) + SADB(40) + SADB(48) + SADB(56); #undef SADB } -uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) -{ -#define SADW(SHR) \ - abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) +uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) { +#define SADW(SHR) abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) return SADW(0) + SADW(16) + SADW(32) + SADW(48); #undef SADW } -uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) -{ -#define MULS(SHR) ((uint64_t) ((( \ - EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ - ) >> 0) & 0xffff) << SHR) +uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) { +#define MULS(SHR) ((uint64_t)(((EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) >> 0) & 0xffff) << SHR) return MULS(0) | MULS(16) | MULS(32) | MULS(48); #undef MULS } -uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) -{ -#define MULS(SHR) ((uint64_t) ((( \ - EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ - ) >> 16) & 0xffff) << SHR) +uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) { +#define MULS(SHR) \ + ((uint64_t)(((EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) >> 16) & 0xffff) << SHR) return MULS(0) | MULS(16) | MULS(32) | MULS(48); #undef MULS } -uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) -{ -#define MULU(SHR) ((uint64_t) ((( \ - ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ - ) >> 0) & 0xffff) << SHR) +uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) { +#define MULU(SHR) ((uint64_t)(((((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff)) >> 0) & 0xffff) << SHR) return MULU(0) | MULU(16) | MULU(32) | MULU(48); #undef MULU } -uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) -{ -#define MULU(SHR) ((uint64_t) ((( \ - ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ - ) >> 16) & 0xffff) << SHR) +uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) { +#define MULU(SHR) ((uint64_t)(((((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff)) >> 16) & 0xffff) << SHR) return MULU(0) | MULU(16) | MULU(32) | MULU(48); #undef MULU } -uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) -{ -#define MACS(SHR) ( \ - EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) - return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48)); +uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) { +#define MACS(SHR) (EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) + return (int64_t)(MACS(0) + MACS(16) + MACS(32) + MACS(48)); #undef MACS } -uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) -{ -#define MACU(SHR) ( \ - (uint32_t) ((a >> SHR) & 0xffff) * \ - (uint32_t) ((b >> SHR) & 0xffff)) +uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) { +#define MACU(SHR) ((uint32_t)((a >> SHR) & 0xffff) * (uint32_t)((b >> SHR) & 0xffff)) return MACU(0) + MACU(16) + MACU(32) + MACU(48); #undef MACU } -#define NZBIT8(x, i) \ - SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \ - SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i) -#define NZBIT16(x, i) \ - SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \ - SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i) +#define NZBIT8(x, i) SIMD8_SET(NBIT8((x) &0xff), SIMD_NBIT, i) | SIMD8_SET(ZBIT8((x) &0xff), SIMD_ZBIT, i) +#define NZBIT16(x, i) SIMD16_SET(NBIT16((x) &0xffff), SIMD_NBIT, i) | SIMD16_SET(ZBIT16((x) &0xffff), SIMD_ZBIT, i) #define NZBIT32(x, i) \ - SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \ - SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i) -#define NZBIT64(x) \ - SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ - SIMD64_SET(ZBIT64(x), SIMD_ZBIT) -#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ - (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ - (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ - (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffff) << 0) | \ - (((b >> SH0) & 0xffff) << 16) | \ - (((a >> SH2) & 0xffff) << 32) | \ - (((b >> SH2) & 0xffff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ - NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffffffff) << 0) | \ - (((b >> SH0) & 0xffffffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xff) << 0) | \ - (((x >> SH1) & 0xff) << 16) | \ - (((x >> SH2) & 0xff) << 32) | \ - (((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xffff) << 0) | \ - (((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = (((x >> SH0) & 0xffffffff) << 0); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ - ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ - ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ - ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ - ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = EXTEND32((x >> SH0) & 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ - return x; \ -} + SIMD32_SET(NBIT32((x) &0xffffffff), SIMD_NBIT, i) | SIMD32_SET(ZBIT32((x) &0xffffffff), SIMD_ZBIT, i) +#define NZBIT64(x) SIMD64_SET(NBIT64(x), SIMD_NBIT) | SIMD64_SET(ZBIT64(x), SIMD_ZBIT) +#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | (((a >> SH1) & 0xff) << 16) | \ + (((b >> SH1) & 0xff) << 24) | (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ + (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | \ + NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = (((a >> SH0) & 0xffff) << 0) | (((b >> SH0) & 0xffff) << 16) | (((a >> SH2) & 0xffff) << 32) | \ + (((b >> SH2) & 0xffff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ + return a; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = (((a >> SH0) & 0xffffffff) << 0) | (((b >> SH0) & 0xffffffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState * env, uint64_t x) { \ + x = (((x >> SH0) & 0xff) << 0) | (((x >> SH1) & 0xff) << 16) | (((x >> SH2) & 0xff) << 32) | \ + (((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState * env, uint64_t x) { \ + x = (((x >> SH0) & 0xffff) << 0) | (((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState * env, uint64_t x) { \ + x = (((x >> SH0) & 0xffffffff) << 0); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState * env, uint64_t x) { \ + x = ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ + ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState * env, uint64_t x) { \ + x = ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ + } \ + uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState * env, uint64_t x) { \ + x = EXTEND32((x >> SH0) & 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ + } IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) -#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ - CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ - CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ - CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ - CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tl, O, 0xffffffff) | \ - CMP(32, Tl, O, 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ - return a; \ -} -#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ - (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) +#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ + uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ + CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | \ + NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ + } \ + uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ + return a; \ + } \ + uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState * env, uint64_t a, uint64_t b) { \ + a = CMP(0, Tl, O, 0xffffffff) | CMP(32, Tl, O, 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ + } +#define CMP(SHR, TYPE, OPER, MASK) \ + ((((TYPE)((a >> SHR) & MASK) OPER(TYPE)((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==) IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >) IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >) #undef CMP -#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ - (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) +#define CMP(SHR, TYPE, OPER, MASK) \ + ((((TYPE)((a >> SHR) & MASK) OPER(TYPE)((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <) IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <) IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >) IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >) #undef CMP -#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ - OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t)(((TYPE)((a >> SHR) & MASK) OPER(TYPE)((b >> SHR) & MASK)) & MASK) << SHR) IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -) IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +) #undef CMP /* TODO Signed- and Unsigned-Saturation */ -#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ - OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t)(((TYPE)((a >> SHR) & MASK) OPER(TYPE)((b >> SHR) & MASK)) & MASK) << SHR) IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -) IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +) IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -) @@ -326,356 +230,254 @@ IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +) #undef CMP #undef IWMMXT_OP_CMP -#define AVGB(SHR) ((( \ - ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) -#define IWMMXT_OP_AVGB(r) \ -uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \ -{ \ - const int round = r; \ - a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \ - AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \ - SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ - SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \ - SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ - SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \ - SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ - SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \ - SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ - return a; \ -} +#define AVGB(SHR) (((((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGB(r) \ + uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState * env, uint64_t a, uint64_t b) { \ + const int round = r; \ + a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ + SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ + SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ + SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ + return a; \ + } IWMMXT_OP_AVGB(0) IWMMXT_OP_AVGB(1) #undef IWMMXT_OP_AVGB #undef AVGB -#define AVGW(SHR) ((( \ - ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) -#define IWMMXT_OP_AVGW(r) \ -uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \ -{ \ - const int round = r; \ - a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ - SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ - SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ - SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ - return a; \ -} +#define AVGW(SHR) (((((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGW(r) \ + uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState * env, uint64_t a, uint64_t b) { \ + const int round = r; \ + a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ + SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ + SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ + SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ + return a; \ + } IWMMXT_OP_AVGW(0) IWMMXT_OP_AVGW(1) #undef IWMMXT_OP_AVGW #undef AVGW -uint64_t HELPER(iwmmxt_msadb)(uint64_t a, uint64_t b) -{ - a = ((((a >> 0 ) & 0xffff) * ((b >> 0) & 0xffff) + - ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) | - ((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + - ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32); +uint64_t HELPER(iwmmxt_msadb)(uint64_t a, uint64_t b) { + a = ((((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) | + ((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32); return a; } -uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) -{ +uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) { a >>= n << 3; a |= b << (64 - (n << 3)); return a; } -uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) -{ +uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) { x &= ~((uint64_t) b << n); - x |= (uint64_t) (a & b) << n; + x |= (uint64_t)(a & b) << n; return x; } -uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) -{ - return SIMD64_SET((x == 0), SIMD_ZBIT) | - SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); +uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) { + return SIMD64_SET((x == 0), SIMD_ZBIT) | SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); } -uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) -{ +uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) { arg &= 0xff; - return - ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) | - ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | - ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | - ((uint64_t) arg << 48) | ((uint64_t) arg << 56); + return ((uint64_t) arg << 0) | ((uint64_t) arg << 8) | ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | + ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | ((uint64_t) arg << 48) | ((uint64_t) arg << 56); } -uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) -{ +uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) { arg &= 0xffff; - return - ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) | - ((uint64_t) arg << 32) | ((uint64_t) arg << 48); + return ((uint64_t) arg << 0) | ((uint64_t) arg << 16) | ((uint64_t) arg << 32) | ((uint64_t) arg << 48); } -uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) -{ +uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) { return arg | ((uint64_t) arg << 32); } -uint64_t HELPER(iwmmxt_addcb)(uint64_t x) -{ - return - ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + - ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + - ((x >> 32) & 0xff) + ((x >> 40) & 0xff) + - ((x >> 48) & 0xff) + ((x >> 56) & 0xff); +uint64_t HELPER(iwmmxt_addcb)(uint64_t x) { + return ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + ((x >> 32) & 0xff) + + ((x >> 40) & 0xff) + ((x >> 48) & 0xff) + ((x >> 56) & 0xff); } -uint64_t HELPER(iwmmxt_addcw)(uint64_t x) -{ - return - ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + - ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); +uint64_t HELPER(iwmmxt_addcw)(uint64_t x) { + return ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); } -uint64_t HELPER(iwmmxt_addcl)(uint64_t x) -{ +uint64_t HELPER(iwmmxt_addcl)(uint64_t x) { return (x & 0xffffffff) + (x >> 32); } -uint32_t HELPER(iwmmxt_msbb)(uint64_t x) -{ - return - ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | - ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | - ((x >> 35) & 0x10) | ((x >> 42) & 0x20) | - ((x >> 49) & 0x40) | ((x >> 56) & 0x80); +uint32_t HELPER(iwmmxt_msbb)(uint64_t x) { + return ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | ((x >> 35) & 0x10) | + ((x >> 42) & 0x20) | ((x >> 49) & 0x40) | ((x >> 56) & 0x80); } -uint32_t HELPER(iwmmxt_msbw)(uint64_t x) -{ - return - ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | - ((x >> 45) & 0x04) | ((x >> 52) & 0x08); +uint32_t HELPER(iwmmxt_msbw)(uint64_t x) { + return ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | ((x >> 45) & 0x04) | ((x >> 52) & 0x08); } -uint32_t HELPER(iwmmxt_msbl)(uint64_t x) -{ +uint32_t HELPER(iwmmxt_msbl)(uint64_t x) { return ((x >> 31) & 0x01) | ((x >> 62) & 0x02); } /* FIXME: Split wCASF setting into a separate op to avoid env use. */ -uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | - (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | - (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | - (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); +uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) { + x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } -uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x & (0xffffffffll << 0)) >> n) | - ((x >> n) & (0xffffffffll << 32)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); +uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) { + x = ((x & (0xffffffffll << 0)) >> n) | ((x >> n) & (0xffffffffll << 32)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } -uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) -{ +uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) { x >>= n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } -uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | - (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | - (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | - (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); +uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) { + x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } -uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x << n) & (0xffffffffll << 0)) | - ((x & (0xffffffffll << 32)) << n); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); +uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) { + x = ((x << n) & (0xffffffffll << 0)) | ((x & (0xffffffffll << 32)) << n); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } -uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) -{ +uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) { x <<= n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } -uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | - ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | - ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | - ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48); +uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) { + x = ((uint64_t)((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | ((uint64_t)((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | + ((uint64_t)((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | ((uint64_t)((EXTEND16(x >> 48) >> n) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } -uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | - (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); +uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) { + x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } -uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) -{ +uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) { x = (int64_t) x >> n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } -uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((((x & (0xffffll << 0)) >> n) | - ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | - ((((x & (0xffffll << 16)) >> n) | - ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | - ((((x & (0xffffll << 32)) >> n) | - ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | - ((((x & (0xffffll << 48)) >> n) | - ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); +uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) { + x = ((((x & (0xffffll << 0)) >> n) | ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | + ((((x & (0xffffll << 16)) >> n) | ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | + ((((x & (0xffffll << 32)) >> n) | ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | + ((((x & (0xffffll << 48)) >> n) | ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } -uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x & (0xffffffffll << 0)) >> n) | - ((x >> n) & (0xffffffffll << 32)) | - ((x << (32 - n)) & (0xffffffffll << 0)) | +uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) { + x = ((x & (0xffffffffll << 0)) >> n) | ((x >> n) & (0xffffffffll << 32)) | ((x << (32 - n)) & (0xffffffffll << 0)) | ((x & (0xffffffffll << 32)) << (32 - n)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } -uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) -{ +uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) { x = (x >> n) | (x << (64 - n)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } -uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | - (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | - (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | - (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); +uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) { + x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | + (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } /* TODO: Unsigned-Saturation */ -uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | - (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | - (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | - (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); +uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) { + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | (((b >> 32) & 0xff) << 48) | + (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | + NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); return a; } -uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | - (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); +uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) { + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | (((b >> 0) & 0xffff) << 32) | + (((b >> 32) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); return a; } -uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) -{ +uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) { a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); return a; } /* TODO: Signed-Saturation */ -uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | - (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | - (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | - (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); +uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) { + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | (((b >> 32) & 0xff) << 48) | + (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | + NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); return a; } -uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | - (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); +uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) { + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | (((b >> 0) & 0xffff) << 32) | + (((b >> 32) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); return a; } -uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) -{ +uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) { a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); return a; } -uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) -{ +uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) { return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b)); } -uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) -{ - c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * - EXTEND16S((b >> 0) & 0xffff)); - c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * - EXTEND16S((b >> 16) & 0xffff)); +uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) { + c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff)); + c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)); return c; } -uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) -{ - return c + (EXTEND32(EXTEND16S(a & 0xffff) * - EXTEND16S(b & 0xffff))); +uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) { + return c + (EXTEND32(EXTEND16S(a & 0xffff) * EXTEND16S(b & 0xffff))); } diff --git a/src/target-arm/neon_helper.c b/src/target-arm/neon_helper.c index 76d8fa6..d231ac5 100644 --- a/src/target-arm/neon_helper.c +++ b/src/target-arm/neon_helper.c @@ -6,53 +6,46 @@ * * This code is licensed under the GNU GPL v2. */ -#include #include +#include #include "cpu.h" #include "exec-all.h" #include "helper.h" -#define SIGNBIT (uint32_t)0x80000000 -#define SIGNBIT64 ((uint64_t)1 << 63) +#define SIGNBIT (uint32_t) 0x80000000 +#define SIGNBIT64 ((uint64_t) 1 << 63) #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q #define NEON_TYPE1(name, type) \ -typedef struct \ -{ \ - type v1; \ -} neon_##name; + typedef struct { type v1; } neon_##name; #ifdef HOST_WORDS_BIGENDIAN #define NEON_TYPE2(name, type) \ -typedef struct \ -{ \ - type v2; \ - type v1; \ -} neon_##name; + typedef struct { \ + type v2; \ + type v1; \ + } neon_##name; #define NEON_TYPE4(name, type) \ -typedef struct \ -{ \ - type v4; \ - type v3; \ - type v2; \ - type v1; \ -} neon_##name; + typedef struct { \ + type v4; \ + type v3; \ + type v2; \ + type v1; \ + } neon_##name; #else #define NEON_TYPE2(name, type) \ -typedef struct \ -{ \ - type v1; \ - type v2; \ -} neon_##name; + typedef struct { \ + type v1; \ + type v2; \ + } neon_##name; #define NEON_TYPE4(name, type) \ -typedef struct \ -{ \ - type v1; \ - type v2; \ - type v3; \ - type v4; \ -} neon_##name; + typedef struct { \ + type v1; \ + type v2; \ + type v3; \ + type v4; \ + } neon_##name; #endif NEON_TYPE4(s8, int8_t) @@ -66,104 +59,102 @@ NEON_TYPE1(u32, uint32_t) #undef NEON_TYPE1 /* Copy from a uint32_t to a vector structure type. */ -#define NEON_UNPACK(vtype, dest, val) do { \ - union { \ - vtype v; \ - uint32_t i; \ - } conv_u; \ - conv_u.i = (val); \ - dest = conv_u.v; \ - } while(0) +#define NEON_UNPACK(vtype, dest, val) \ + do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.i = (val); \ + dest = conv_u.v; \ + } while (0) /* Copy from a vector structure type to a uint32_t. */ -#define NEON_PACK(vtype, dest, val) do { \ - union { \ - vtype v; \ - uint32_t i; \ - } conv_u; \ - conv_u.v = (val); \ - dest = conv_u.i; \ - } while(0) - -#define NEON_DO1 \ - NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); -#define NEON_DO2 \ +#define NEON_PACK(vtype, dest, val) \ + do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.v = (val); \ + dest = conv_u.i; \ + } while (0) + +#define NEON_DO1 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); +#define NEON_DO2 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); -#define NEON_DO4 \ +#define NEON_DO4 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \ NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \ NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4); -#define NEON_VOP_BODY(vtype, n) \ -{ \ - uint32_t res; \ - vtype vsrc1; \ - vtype vsrc2; \ - vtype vdest; \ - NEON_UNPACK(vtype, vsrc1, arg1); \ - NEON_UNPACK(vtype, vsrc2, arg2); \ - NEON_DO##n; \ - NEON_PACK(vtype, res, vdest); \ - return res; \ -} +#define NEON_VOP_BODY(vtype, n) \ + { \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_DO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ + } #define NEON_VOP(name, vtype, n) \ -uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ -NEON_VOP_BODY(vtype, n) + uint32_t HELPER(glue(neon_, name))(uint32_t arg1, uint32_t arg2) NEON_VOP_BODY(vtype, n) #define NEON_VOP_ENV(name, vtype, n) \ -uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \ -NEON_VOP_BODY(vtype, n) + uint32_t HELPER(glue(neon_, name))(CPUARMState * env, uint32_t arg1, uint32_t arg2) NEON_VOP_BODY(vtype, n) /* Pairwise operations. */ /* For 32-bit elements each segment only contains a single element, so the elementwise and pairwise operations are the same. */ -#define NEON_PDO2 \ +#define NEON_PDO2 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2); -#define NEON_PDO4 \ +#define NEON_PDO4 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \ NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \ - NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \ - -#define NEON_POP(name, vtype, n) \ -uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ -{ \ - uint32_t res; \ - vtype vsrc1; \ - vtype vsrc2; \ - vtype vdest; \ - NEON_UNPACK(vtype, vsrc1, arg1); \ - NEON_UNPACK(vtype, vsrc2, arg2); \ - NEON_PDO##n; \ - NEON_PACK(vtype, res, vdest); \ - return res; \ -} + NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); + +#define NEON_POP(name, vtype, n) \ + uint32_t HELPER(glue(neon_, name))(uint32_t arg1, uint32_t arg2) { \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_PDO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ + } /* Unary operators. */ -#define NEON_VOP1(name, vtype, n) \ -uint32_t HELPER(glue(neon_,name))(uint32_t arg) \ -{ \ - vtype vsrc1; \ - vtype vdest; \ - NEON_UNPACK(vtype, vsrc1, arg); \ - NEON_DO##n; \ - NEON_PACK(vtype, arg, vdest); \ - return arg; \ -} - - -#define NEON_USAT(dest, src1, src2, type) do { \ - uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ - if (tmp != (type)tmp) { \ - SET_QC(); \ - dest = ~0; \ - } else { \ - dest = tmp; \ - }} while(0) +#define NEON_VOP1(name, vtype, n) \ + uint32_t HELPER(glue(neon_, name))(uint32_t arg) { \ + vtype vsrc1; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg); \ + NEON_DO##n; \ + NEON_PACK(vtype, arg, vdest); \ + return arg; \ + } + +#define NEON_USAT(dest, src1, src2, type) \ + do { \ + uint32_t tmp = (uint32_t) src1 + (uint32_t) src2; \ + if (tmp != (type) tmp) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = tmp; \ + } \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) NEON_VOP_ENV(qadd_u8, neon_u8, 4) #undef NEON_FN @@ -172,8 +163,7 @@ NEON_VOP_ENV(qadd_u16, neon_u16, 2) #undef NEON_FN #undef NEON_USAT -uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (res < a) { SET_QC(); @@ -182,30 +172,30 @@ uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) return res; } -uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) -{ +uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 + src2; if (res < src1) { SET_QC(); - res = ~(uint64_t)0; + res = ~(uint64_t) 0; } return res; } -#define NEON_SSAT(dest, src1, src2, type) do { \ - int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ - if (tmp != (type)tmp) { \ - SET_QC(); \ - if (src2 > 0) { \ - tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ - } else { \ - tmp = 1 << (sizeof(type) * 8 - 1); \ - } \ - } \ - dest = tmp; \ - } while(0) +#define NEON_SSAT(dest, src1, src2, type) \ + do { \ + int32_t tmp = (uint32_t) src1 + (uint32_t) src2; \ + if (tmp != (type) tmp) { \ + SET_QC(); \ + if (src2 > 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) NEON_VOP_ENV(qadd_s8, neon_s8, 4) #undef NEON_FN @@ -214,36 +204,36 @@ NEON_VOP_ENV(qadd_s16, neon_s16, 2) #undef NEON_FN #undef NEON_SSAT -uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { SET_QC(); - res = ~(((int32_t)a >> 31) ^ SIGNBIT); + res = ~(((int32_t) a >> 31) ^ SIGNBIT); } return res; } -uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) -{ +uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 + src2; if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { SET_QC(); - res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + res = ((int64_t) src1 >> 63) ^ ~SIGNBIT64; } return res; } -#define NEON_USAT(dest, src1, src2, type) do { \ - uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ - if (tmp != (type)tmp) { \ - SET_QC(); \ - dest = 0; \ - } else { \ - dest = tmp; \ - }} while(0) +#define NEON_USAT(dest, src1, src2, type) \ + do { \ + uint32_t tmp = (uint32_t) src1 - (uint32_t) src2; \ + if (tmp != (type) tmp) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + dest = tmp; \ + } \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) NEON_VOP_ENV(qsub_u8, neon_u8, 4) #undef NEON_FN @@ -252,8 +242,7 @@ NEON_VOP_ENV(qsub_u16, neon_u16, 2) #undef NEON_FN #undef NEON_USAT -uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (res > a) { SET_QC(); @@ -262,8 +251,7 @@ uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) return res; } -uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) -{ +uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; if (src1 < src2) { @@ -275,18 +263,19 @@ uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) return res; } -#define NEON_SSAT(dest, src1, src2, type) do { \ - int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ - if (tmp != (type)tmp) { \ - SET_QC(); \ - if (src2 < 0) { \ - tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ - } else { \ - tmp = 1 << (sizeof(type) * 8 - 1); \ - } \ - } \ - dest = tmp; \ - } while(0) +#define NEON_SSAT(dest, src1, src2, type) \ + do { \ + int32_t tmp = (uint32_t) src1 - (uint32_t) src2; \ + if (tmp != (type) tmp) { \ + SET_QC(); \ + if (src2 < 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) NEON_VOP_ENV(qsub_s8, neon_s8, 4) #undef NEON_FN @@ -295,24 +284,22 @@ NEON_VOP_ENV(qsub_s16, neon_s16, 2) #undef NEON_FN #undef NEON_SSAT -uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { SET_QC(); - res = ~(((int32_t)a >> 31) ^ SIGNBIT); + res = ~(((int32_t) a >> 31) ^ SIGNBIT); } return res; } -uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) -{ +uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 - src2; if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { SET_QC(); - res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + res = ((int64_t) src1 >> 63) ^ ~SIGNBIT64; } return res; } @@ -324,8 +311,7 @@ NEON_VOP(hadd_s16, neon_s16, 2) NEON_VOP(hadd_u16, neon_u16, 2) #undef NEON_FN -int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) -{ +int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) + (src2 >> 1); @@ -334,8 +320,7 @@ int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) return dest; } -uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2) -{ +uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) + (src2 >> 1); @@ -351,8 +336,7 @@ NEON_VOP(rhadd_s16, neon_s16, 2) NEON_VOP(rhadd_u16, neon_u16, 2) #undef NEON_FN -int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) -{ +int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) + (src2 >> 1); @@ -361,8 +345,7 @@ int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) return dest; } -uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2) -{ +uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) + (src2 >> 1); @@ -378,8 +361,7 @@ NEON_VOP(hsub_s16, neon_s16, 2) NEON_VOP(hsub_u16, neon_u16, 2) #undef NEON_FN -int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) -{ +int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) - (src2 >> 1); @@ -388,8 +370,7 @@ int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) return dest; } -uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2) -{ +uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) - (src2 >> 1); @@ -442,8 +423,7 @@ NEON_POP(pmax_s16, neon_s16, 2) NEON_POP(pmax_u16, neon_u16, 2) #undef NEON_FN -#define NEON_FN(dest, src1, src2) \ - dest = (src1 > src2) ? (src1 - src2) : (src2 - src1) +#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? (src1 - src2) : (src2 - src1) NEON_VOP(abd_s8, neon_s8, 4) NEON_VOP(abd_u8, neon_u8, 4) NEON_VOP(abd_s16, neon_s16, 2) @@ -452,25 +432,25 @@ NEON_VOP(abd_s32, neon_s32, 1) NEON_VOP(abd_u32, neon_u32, 1) #undef NEON_FN -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8 || \ - tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp < 0) { \ - dest = src1 >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8 || tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + } \ + } while (0) NEON_VOP(shl_u8, neon_u8, 4) NEON_VOP(shl_u16, neon_u16, 2) NEON_VOP(shl_u32, neon_u32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; +uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) { + int8_t shift = (int8_t) shiftop; if (shift >= 64 || shift <= -64) { val = 0; } else if (shift < 0) { @@ -481,26 +461,27 @@ uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = src1 >> (sizeof(src1) * 8 - 1); \ - } else if (tmp < 0) { \ - dest = src1 >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + } \ + } while (0) NEON_VOP(shl_s8, neon_s8, 4) NEON_VOP(shl_s16, neon_s16, 2) NEON_VOP(shl_s32, neon_s32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; +uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) { + int8_t shift = (int8_t) shiftop; int64_t val = valop; if (shift >= 64) { val = 0; @@ -514,32 +495,32 @@ uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if ((tmp >= (ssize_t)sizeof(src1) * 8) \ - || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \ - dest = 0; \ - } else if (tmp < 0) { \ - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if ((tmp >= (ssize_t) sizeof(src1) * 8) || (tmp <= -(ssize_t) sizeof(src1) * 8)) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + } \ + } while (0) NEON_VOP(rshl_s8, neon_s8, 4) NEON_VOP(rshl_s16, neon_s16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bits accumulator. */ -uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) -{ +uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) { int32_t dest; - int32_t val = (int32_t)valop; - int8_t shift = (int8_t)shiftop; + int32_t val = (int32_t) valop; + int8_t shift = (int8_t) shiftop; if ((shift >= 32) || (shift <= -32)) { dest = 0; } else if (shift < 0) { - int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); + int64_t big_dest = ((int64_t) val + (1 << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; @@ -549,9 +530,8 @@ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) /* Handling addition overflow with 64 bits inputs values is more * tricky than with 32 bits values. */ -uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; +uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) { + int8_t shift = (int8_t) shiftop; int64_t val = valop; if ((shift >= 64) || (shift <= -64)) { val = 0; @@ -572,35 +552,35 @@ uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8 || \ - tmp < -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ - dest = src1 >> (-tmp - 1); \ - } else if (tmp < 0) { \ - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8 || tmp < -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t) sizeof(src1) * 8) { \ + dest = src1 >> (-tmp - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + } \ + } while (0) NEON_VOP(rshl_u8, neon_u8, 4) NEON_VOP(rshl_u16, neon_u16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bits accumulator. */ -uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) -{ +uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) { uint32_t dest; - int8_t shift = (int8_t)shiftop; + int8_t shift = (int8_t) shiftop; if (shift >= 32 || shift < -32) { dest = 0; } else if (shift == -32) { dest = val >> 31; } else if (shift < 0) { - uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); + uint64_t big_dest = ((uint64_t) val + (1 << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; @@ -610,9 +590,8 @@ uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) /* Handling addition overflow with 64 bits inputs values is more * tricky than with 32 bits values. */ -uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) -{ - int8_t shift = (uint8_t)shiftop; +uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) { + int8_t shift = (uint8_t) shiftop; if (shift >= 64 || shift < -64) { val = 0; } else if (shift == -64) { @@ -635,38 +614,39 @@ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - if (src1) { \ - SET_QC(); \ - dest = ~0; \ - } else { \ - dest = 0; \ - } \ - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp < 0) { \ - dest = src1 >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - if ((dest >> tmp) != src1) { \ - SET_QC(); \ - dest = ~0; \ - } \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + } \ + } while (0) NEON_VOP_ENV(qshl_u8, neon_u8, 4) NEON_VOP_ENV(qshl_u16, neon_u16, 2) NEON_VOP_ENV(qshl_u32, neon_u32, 1) #undef NEON_FN -uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; +uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) { + int8_t shift = (int8_t) shiftop; if (shift >= 64) { if (val) { - val = ~(uint64_t)0; + val = ~(uint64_t) 0; SET_QC(); } } else if (shift <= -64) { @@ -678,47 +658,48 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) val <<= shift; if ((val >> shift) != tmp) { SET_QC(); - val = ~(uint64_t)0; + val = ~(uint64_t) 0; } } return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - if (src1) { \ - SET_QC(); \ - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ - if (src1 > 0) { \ - dest--; \ - } \ - } else { \ - dest = src1; \ - } \ - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = src1 >> 31; \ - } else if (tmp < 0) { \ - dest = src1 >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - if ((dest >> tmp) != src1) { \ - SET_QC(); \ - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ - if (src1 > 0) { \ - dest--; \ - } \ - } \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = src1; \ + } \ + } else if (tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = src1 >> 31; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + } \ + } while (0) NEON_VOP_ENV(qshl_s8, neon_s8, 4) NEON_VOP_ENV(qshl_s16, neon_s16, 2) NEON_VOP_ENV(qshl_s32, neon_s32, 1) #undef NEON_FN -uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (uint8_t)shiftop; +uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { + int8_t shift = (uint8_t) shiftop; int64_t val = valop; if (shift >= 64) { if (val) { @@ -740,48 +721,48 @@ uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shifto return val; } -#define NEON_FN(dest, src1, src2) do { \ - if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ - SET_QC(); \ - dest = 0; \ - } else { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - if (src1) { \ - SET_QC(); \ - dest = ~0; \ - } else { \ - dest = 0; \ - } \ - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp < 0) { \ - dest = src1 >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - if ((dest >> tmp) != src1) { \ - SET_QC(); \ - dest = ~0; \ - } \ - } \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + } \ + } \ + } while (0) NEON_VOP_ENV(qshlu_s8, neon_u8, 4) NEON_VOP_ENV(qshlu_s16, neon_u16, 2) #undef NEON_FN -uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) -{ - if ((int32_t)valop < 0) { +uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) { + if ((int32_t) valop < 0) { SET_QC(); return 0; } return helper_neon_qshl_u32(env, valop, shiftop); } -uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) -{ - if ((int64_t)valop < 0) { +uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { + if ((int64_t) valop < 0) { SET_QC(); return 0; } @@ -789,39 +770,40 @@ uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shift } /* FIXME: This is wrong. */ -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - if (src1) { \ - SET_QC(); \ - dest = ~0; \ - } else { \ - dest = 0; \ - } \ - } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ - dest = src1 >> (sizeof(src1) * 8 - 1); \ - } else if (tmp < 0) { \ - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - if ((dest >> tmp) != src1) { \ - SET_QC(); \ - dest = ~0; \ - } \ - }} while (0) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp < -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t) sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + } \ + } while (0) NEON_VOP_ENV(qrshl_u8, neon_u8, 4) NEON_VOP_ENV(qrshl_u16, neon_u16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bits accumulator. */ -uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) -{ +uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) { uint32_t dest; - int8_t shift = (int8_t)shiftop; + int8_t shift = (int8_t) shiftop; if (shift >= 32) { if (val) { SET_QC(); @@ -834,7 +816,7 @@ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop } else if (shift == -32) { dest = val >> 31; } else if (shift < 0) { - uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); + uint64_t big_dest = ((uint64_t) val + (1 << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; @@ -848,9 +830,8 @@ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop /* Handling addition overflow with 64 bits inputs values is more * tricky than with 32 bits values. */ -uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; +uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) { + int8_t shift = (int8_t) shiftop; if (shift >= 64) { if (val) { SET_QC(); @@ -871,7 +852,7 @@ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop val++; val >>= 1; } - } else { \ + } else { uint64_t tmp = val; val <<= shift; if ((val >> shift) != tmp) { @@ -882,47 +863,45 @@ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop return val; } -#define NEON_FN(dest, src1, src2) do { \ - int8_t tmp; \ - tmp = (int8_t)src2; \ - if (tmp >= (ssize_t)sizeof(src1) * 8) { \ - if (src1) { \ - SET_QC(); \ - dest = (1 << (sizeof(src1) * 8 - 1)); \ - if (src1 > 0) { \ - dest--; \ - } \ - } else { \ - dest = 0; \ - } \ - } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ - dest = 0; \ - } else if (tmp < 0) { \ - dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ - } else { \ - dest = src1 << tmp; \ - if ((dest >> tmp) != src1) { \ - SET_QC(); \ - dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ - if (src1 > 0) { \ - dest--; \ - } \ - } \ - }} while (0) -//NEON_VOP_ENV(qrshl_s8, neon_s8, 4) -//NEON_VOP_ENV(qrshl_s16, neon_s16, 2) +#define NEON_FN(dest, src1, src2) \ + do { \ + int8_t tmp; \ + tmp = (int8_t) src2; \ + if (tmp >= (ssize_t) sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t) sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + } \ + } while (0) +// NEON_VOP_ENV(qrshl_s8, neon_s8, 4) +// NEON_VOP_ENV(qrshl_s16, neon_s16, 2) #undef NEON_FN - - - /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bits accumulator. */ -uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) -{ +uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) { int32_t dest; - int32_t val = (int32_t)valop; - int8_t shift = (int8_t)shiftop; + int32_t val = (int32_t) valop; + int8_t shift = (int8_t) shiftop; if (shift >= 32) { if (val) { SET_QC(); @@ -933,7 +912,7 @@ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shift } else if (shift <= -32) { dest = 0; } else if (shift < 0) { - int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); + int64_t big_dest = ((int64_t) val + (1 << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; @@ -947,9 +926,8 @@ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shift /* Handling addition overflow with 64 bits inputs values is more * tricky than with 32 bits values. */ -uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (uint8_t)shiftop; +uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { + int8_t shift = (uint8_t) shiftop; int64_t val = valop; if (shift >= 64) { @@ -981,8 +959,7 @@ uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shift return val; } -uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) { uint32_t mask; mask = (a ^ b) & 0x80808080u; a &= ~0x80808080u; @@ -990,8 +967,7 @@ uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) return (a + b) ^ mask; } -uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b) -{ +uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b) { uint32_t mask; mask = (a ^ b) & 0x80008000u; a &= ~0x80008000u; @@ -1016,8 +992,7 @@ NEON_VOP(mul_u16, neon_u16, 2) /* Polynomial multiplication is like integer multiplication except the partial products are XORed, not added. */ -uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) -{ +uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) { uint32_t mask; uint32_t result; result = 0; @@ -1038,15 +1013,11 @@ uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) return result; } -uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) -{ +uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) { uint64_t result = 0; uint64_t mask; uint64_t op2ex = op2; - op2ex = (op2ex & 0xff) | - ((op2ex & 0xff00) << 8) | - ((op2ex & 0xff0000) << 16) | - ((op2ex & 0xff000000) << 24); + op2ex = (op2ex & 0xff) | ((op2ex & 0xff00) << 8) | ((op2ex & 0xff0000) << 16) | ((op2ex & 0xff000000) << 24); while (op1) { mask = 0; if (op1 & 1) { @@ -1086,16 +1057,14 @@ NEON_VOP1(abs_s16, neon_s16, 2) #undef NEON_FN /* Count Leading Sign/Zero Bits. */ -static inline int do_clz8(uint8_t x) -{ +static inline int do_clz8(uint8_t x) { int n; for (n = 8; x; n--) x >>= 1; return n; } -static inline int do_clz16(uint16_t x) -{ +static inline int do_clz16(uint16_t x) { int n; for (n = 16; x; n--) x >>= 1; @@ -1118,10 +1087,9 @@ NEON_VOP1(cls_s8, neon_s8, 4) NEON_VOP1(cls_s16, neon_s16, 2) #undef NEON_FN -uint32_t HELPER(neon_cls_s32)(uint32_t x) -{ +uint32_t HELPER(neon_cls_s32)(uint32_t x) { int count; - if ((int32_t)x < 0) + if ((int32_t) x < 0) x = ~x; for (count = 32; x; count--) x = x >> 1; @@ -1129,32 +1097,32 @@ uint32_t HELPER(neon_cls_s32)(uint32_t x) } /* Bit count. */ -uint32_t HELPER(neon_cnt_u8)(uint32_t x) -{ - x = (x & 0x55555555) + ((x >> 1) & 0x55555555); - x = (x & 0x33333333) + ((x >> 2) & 0x33333333); - x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); +uint32_t HELPER(neon_cnt_u8)(uint32_t x) { + x = (x & 0x55555555) + ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); return x; } -#define NEON_QDMULH16(dest, src1, src2, round) do { \ - uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ - if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ - SET_QC(); \ - tmp = (tmp >> 31) ^ ~SIGNBIT; \ - } else { \ - tmp <<= 1; \ - } \ - if (round) { \ - int32_t old = tmp; \ - tmp += 1 << 15; \ - if ((int32_t)tmp < old) { \ - SET_QC(); \ - tmp = SIGNBIT - 1; \ - } \ - } \ - dest = tmp >> 16; \ - } while(0) +#define NEON_QDMULH16(dest, src1, src2, round) \ + do { \ + uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ + SET_QC(); \ + tmp = (tmp >> 31) ^ ~SIGNBIT; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int32_t old = tmp; \ + tmp += 1 << 15; \ + if ((int32_t) tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT - 1; \ + } \ + } \ + dest = tmp >> 16; \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0) NEON_VOP_ENV(qdmulh_s16, neon_s16, 2) #undef NEON_FN @@ -1163,24 +1131,25 @@ NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2) #undef NEON_FN #undef NEON_QDMULH16 -#define NEON_QDMULH32(dest, src1, src2, round) do { \ - uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \ - if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \ - SET_QC(); \ - tmp = (tmp >> 63) ^ ~SIGNBIT64; \ - } else { \ - tmp <<= 1; \ - } \ - if (round) { \ - int64_t old = tmp; \ - tmp += (int64_t)1 << 31; \ - if ((int64_t)tmp < old) { \ - SET_QC(); \ - tmp = SIGNBIT64 - 1; \ - } \ - } \ - dest = tmp >> 32; \ - } while(0) +#define NEON_QDMULH32(dest, src1, src2, round) \ + do { \ + uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \ + SET_QC(); \ + tmp = (tmp >> 63) ^ ~SIGNBIT64; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int64_t old = tmp; \ + tmp += (int64_t) 1 << 31; \ + if ((int64_t) tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT64 - 1; \ + } \ + } \ + dest = tmp >> 32; \ + } while (0) #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0) NEON_VOP_ENV(qdmulh_s32, neon_s32, 1) #undef NEON_FN @@ -1189,60 +1158,50 @@ NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1) #undef NEON_FN #undef NEON_QDMULH32 -uint32_t HELPER(neon_narrow_u8)(uint64_t x) -{ - return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) - | ((x >> 24) & 0xff000000u); +uint32_t HELPER(neon_narrow_u8)(uint64_t x) { + return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) | ((x >> 24) & 0xff000000u); } -uint32_t HELPER(neon_narrow_u16)(uint64_t x) -{ +uint32_t HELPER(neon_narrow_u16)(uint64_t x) { return (x & 0xffffu) | ((x >> 16) & 0xffff0000u); } -uint32_t HELPER(neon_narrow_high_u8)(uint64_t x) -{ - return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) - | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); +uint32_t HELPER(neon_narrow_high_u8)(uint64_t x) { + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); } -uint32_t HELPER(neon_narrow_high_u16)(uint64_t x) -{ +uint32_t HELPER(neon_narrow_high_u16)(uint64_t x) { return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); } -uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x) -{ +uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x) { x &= 0xff80ff80ff80ff80ull; x += 0x0080008000800080ull; - return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) - | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); } -uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) -{ +uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) { x &= 0xffff8000ffff8000ull; x += 0x0000800000008000ull; return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); } -uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; uint32_t res = 0; -#define SAT8(n) \ - s = x >> n; \ - if (s & 0x8000) { \ - SET_QC(); \ - } else { \ - if (s > 0xff) { \ - d = 0xff; \ - SET_QC(); \ - } else { \ - d = s; \ - } \ - res |= (uint32_t)d << (n / 2); \ +#define SAT8(n) \ + s = x >> n; \ + if (s & 0x8000) { \ + SET_QC(); \ + } else { \ + if (s > 0xff) { \ + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t) d << (n / 2); \ } SAT8(0); @@ -1253,20 +1212,19 @@ uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; uint32_t res = 0; -#define SAT8(n) \ - s = x >> n; \ +#define SAT8(n) \ + s = x >> n; \ if (s > 0xff) { \ - d = 0xff; \ - SET_QC(); \ - } else { \ - d = s; \ - } \ - res |= (uint32_t)d << (n / 2); + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t) d << (n / 2); SAT8(0); SAT8(16); @@ -1276,20 +1234,19 @@ uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) { int16_t s; uint8_t d; uint32_t res = 0; -#define SAT8(n) \ - s = x >> n; \ - if (s != (int8_t)s) { \ +#define SAT8(n) \ + s = x >> n; \ + if (s != (int8_t) s) { \ d = (s >> 15) ^ 0x7f; \ - SET_QC(); \ - } else { \ - d = s; \ - } \ - res |= (uint32_t)d << (n / 2); + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t) d << (n / 2); SAT8(0); SAT8(16); @@ -1299,8 +1256,7 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; low = x; @@ -1322,8 +1278,7 @@ uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) return low | (high << 16); } -uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; low = x; @@ -1339,25 +1294,23 @@ uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) return low | (high << 16); } -uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) { int32_t low; int32_t high; low = x; - if (low != (int16_t)low) { + if (low != (int16_t) low) { low = (low >> 31) ^ 0x7fff; SET_QC(); } high = x >> 32; - if (high != (int16_t)high) { + if (high != (int16_t) high) { high = (high >> 31) ^ 0x7fff; SET_QC(); } - return (uint16_t)low | (high << 16); + return (uint16_t) low | (high << 16); } -uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) { if (x & 0x8000000000000000ull) { SET_QC(); return 0; @@ -1369,8 +1322,7 @@ uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) return x; } -uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) -{ +uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) { if (x > 0xffffffffu) { SET_QC(); return 0xffffffffu; @@ -1378,20 +1330,18 @@ uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) return x; } -uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) -{ - if ((int64_t)x != (int32_t)x) { +uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) { + if ((int64_t) x != (int32_t) x) { SET_QC(); - return ((int64_t)x >> 63) ^ 0x7fffffff; + return ((int64_t) x >> 63) ^ 0x7fffffff; } return x; } -uint64_t HELPER(neon_widen_u8)(uint32_t x) -{ +uint64_t HELPER(neon_widen_u8)(uint32_t x) { uint64_t tmp; uint64_t ret; - ret = (uint8_t)x; + ret = (uint8_t) x; tmp = (uint8_t)(x >> 8); ret |= tmp << 16; tmp = (uint8_t)(x >> 16); @@ -1401,11 +1351,10 @@ uint64_t HELPER(neon_widen_u8)(uint32_t x) return ret; } -uint64_t HELPER(neon_widen_s8)(uint32_t x) -{ +uint64_t HELPER(neon_widen_s8)(uint32_t x) { uint64_t tmp; uint64_t ret; - ret = (uint16_t)(int8_t)x; + ret = (uint16_t)(int8_t) x; tmp = (uint16_t)(int8_t)(x >> 8); ret |= tmp << 16; tmp = (uint16_t)(int8_t)(x >> 16); @@ -1415,20 +1364,17 @@ uint64_t HELPER(neon_widen_s8)(uint32_t x) return ret; } -uint64_t HELPER(neon_widen_u16)(uint32_t x) -{ +uint64_t HELPER(neon_widen_u16)(uint32_t x) { uint64_t high = (uint16_t)(x >> 16); - return ((uint16_t)x) | (high << 32); + return ((uint16_t) x) | (high << 32); } -uint64_t HELPER(neon_widen_s16)(uint32_t x) -{ +uint64_t HELPER(neon_widen_s16)(uint32_t x) { uint64_t high = (int16_t)(x >> 16); - return ((uint32_t)(int16_t)x) | (high << 32); + return ((uint32_t)(int16_t) x) | (high << 32); } -uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ b) & 0x8000800080008000ull; a &= ~0x8000800080008000ull; @@ -1436,8 +1382,7 @@ uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) return (a + b) ^ mask; } -uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ b) & 0x8000000080000000ull; a &= ~0x8000000080000000ull; @@ -1445,8 +1390,7 @@ uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) return (a + b) ^ mask; } -uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) { uint64_t tmp; uint64_t tmp2; @@ -1454,21 +1398,17 @@ uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) tmp += (a >> 16) & 0x0000ffff0000ffffull; tmp2 = b & 0xffff0000ffff0000ull; tmp2 += (b << 16) & 0xffff0000ffff0000ull; - return ( tmp & 0xffff) - | ((tmp >> 16) & 0xffff0000ull) - | ((tmp2 << 16) & 0xffff00000000ull) - | ( tmp2 & 0xffff000000000000ull); + return (tmp & 0xffff) | ((tmp >> 16) & 0xffff0000ull) | ((tmp2 << 16) & 0xffff00000000ull) | + (tmp2 & 0xffff000000000000ull); } -uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) { uint32_t low = a + (a >> 32); uint32_t high = b + (b >> 32); - return low + ((uint64_t)high << 32); + return low + ((uint64_t) high << 32); } -uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ ~b) & 0x8000800080008000ull; a |= 0x8000800080008000ull; @@ -1476,8 +1416,7 @@ uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) return (a - b) ^ mask; } -uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ ~b) & 0x8000000080000000ull; a |= 0x8000000080000000ull; @@ -1485,8 +1424,7 @@ uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) return (a - b) ^ mask; } -uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) { uint32_t x, y; uint32_t low, high; @@ -1495,26 +1433,25 @@ uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b low = x + y; if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { SET_QC(); - low = ((int32_t)x >> 31) ^ ~SIGNBIT; + low = ((int32_t) x >> 31) ^ ~SIGNBIT; } x = a >> 32; y = b >> 32; high = x + y; if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { SET_QC(); - high = ((int32_t)x >> 31) ^ ~SIGNBIT; + high = ((int32_t) x >> 31) ^ ~SIGNBIT; } - return low | ((uint64_t)high << 32); + return low | ((uint64_t) high << 32); } -uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b) -{ +uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b) { uint64_t result; result = a + b; if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) { SET_QC(); - result = ((int64_t)a >> 63) ^ ~SIGNBIT64; + result = ((int64_t) a >> 63) ^ ~SIGNBIT64; } return result; } @@ -1523,14 +1460,14 @@ uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b * the input type, because for example with a signed 32 bit * op the absolute difference can overflow a signed 32 bit value. */ -#define DO_ABD(dest, x, y, intype, arithtype) do { \ - arithtype tmp_x = (intype)(x); \ - arithtype tmp_y = (intype)(y); \ - dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \ - } while(0) - -uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) -{ +#define DO_ABD(dest, x, y, intype, arithtype) \ + do { \ + arithtype tmp_x = (intype)(x); \ + arithtype tmp_y = (intype)(y); \ + dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \ + } while (0) + +uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, uint8_t, uint32_t); @@ -1543,8 +1480,7 @@ uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) return result; } -uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, int8_t, int32_t); @@ -1557,8 +1493,7 @@ uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) return result; } -uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, uint16_t, uint32_t); @@ -1566,8 +1501,7 @@ uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) return result | (tmp << 32); } -uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, int16_t, int32_t); @@ -1575,15 +1509,13 @@ uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) return result | (tmp << 32); } -uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b) { uint64_t result; DO_ABD(result, a, b, uint32_t, uint64_t); return result; } -uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) { uint64_t result; DO_ABD(result, a, b, int32_t, int64_t); return result; @@ -1591,14 +1523,14 @@ uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) #undef DO_ABD /* Widening multiply. Named type is the source type. */ -#define DO_MULL(dest, x, y, type1, type2) do { \ - type1 tmp_x = x; \ - type1 tmp_y = y; \ - dest = (type2)((type2)tmp_x * (type2)tmp_y); \ - } while(0) - -uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) -{ +#define DO_MULL(dest, x, y, type1, type2) \ + do { \ + type1 tmp_x = x; \ + type1 tmp_y = y; \ + dest = (type2)((type2) tmp_x * (type2) tmp_y); \ + } while (0) + +uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; @@ -1612,8 +1544,7 @@ uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) return result; } -uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; @@ -1627,8 +1558,7 @@ uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) return result; } -uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; @@ -1637,8 +1567,7 @@ uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) return result | (tmp << 32); } -uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) -{ +uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; @@ -1647,44 +1576,42 @@ uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) return result | (tmp << 32); } -uint64_t HELPER(neon_negl_u16)(uint64_t x) -{ +uint64_t HELPER(neon_negl_u16)(uint64_t x) { uint16_t tmp; uint64_t result; - result = (uint16_t)-x; + result = (uint16_t) -x; tmp = -(x >> 16); - result |= (uint64_t)tmp << 16; + result |= (uint64_t) tmp << 16; tmp = -(x >> 32); - result |= (uint64_t)tmp << 32; + result |= (uint64_t) tmp << 32; tmp = -(x >> 48); - result |= (uint64_t)tmp << 48; + result |= (uint64_t) tmp << 48; return result; } -uint64_t HELPER(neon_negl_u32)(uint64_t x) -{ +uint64_t HELPER(neon_negl_u32)(uint64_t x) { uint32_t low = -x; uint32_t high = -(x >> 32); - return low | ((uint64_t)high << 32); + return low | ((uint64_t) high << 32); } /* FIXME: There should be a native op for this. */ -uint64_t HELPER(neon_negl_u64)(uint64_t x) -{ +uint64_t HELPER(neon_negl_u64)(uint64_t x) { return -x; } /* Saturnating sign manuipulation. */ /* ??? Make these use NEON_VOP1 */ -#define DO_QABS8(x) do { \ - if (x == (int8_t)0x80) { \ - x = 0x7f; \ - SET_QC(); \ - } else if (x < 0) { \ - x = -x; \ - }} while (0) -uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) -{ +#define DO_QABS8(x) \ + do { \ + if (x == (int8_t) 0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + } \ + } while (0) +uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) { neon_s8 vec; NEON_UNPACK(neon_s8, vec, x); DO_QABS8(vec.v1); @@ -1696,15 +1623,16 @@ uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) } #undef DO_QABS8 -#define DO_QNEG8(x) do { \ - if (x == (int8_t)0x80) { \ - x = 0x7f; \ - SET_QC(); \ - } else { \ - x = -x; \ - }} while (0) -uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) -{ +#define DO_QNEG8(x) \ + do { \ + if (x == (int8_t) 0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else { \ + x = -x; \ + } \ + } while (0) +uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) { neon_s8 vec; NEON_UNPACK(neon_s8, vec, x); DO_QNEG8(vec.v1); @@ -1716,15 +1644,16 @@ uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) } #undef DO_QNEG8 -#define DO_QABS16(x) do { \ - if (x == (int16_t)0x8000) { \ - x = 0x7fff; \ - SET_QC(); \ - } else if (x < 0) { \ - x = -x; \ - }} while (0) -uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) -{ +#define DO_QABS16(x) \ + do { \ + if (x == (int16_t) 0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + } \ + } while (0) +uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) { neon_s16 vec; NEON_UNPACK(neon_s16, vec, x); DO_QABS16(vec.v1); @@ -1734,15 +1663,16 @@ uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) } #undef DO_QABS16 -#define DO_QNEG16(x) do { \ - if (x == (int16_t)0x8000) { \ - x = 0x7fff; \ - SET_QC(); \ - } else { \ - x = -x; \ - }} while (0) -uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) -{ +#define DO_QNEG16(x) \ + do { \ + if (x == (int16_t) 0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else { \ + x = -x; \ + } \ + } while (0) +uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) { neon_s16 vec; NEON_UNPACK(neon_s16, vec, x); DO_QNEG16(vec.v1); @@ -1752,19 +1682,17 @@ uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) } #undef DO_QNEG16 -uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) -{ +uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) { if (x == SIGNBIT) { SET_QC(); x = ~SIGNBIT; - } else if ((int32_t)x < 0) { + } else if ((int32_t) x < 0) { x = -x; } return x; } -uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) -{ +uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) { if (x == SIGNBIT) { SET_QC(); x = ~SIGNBIT; @@ -1775,20 +1703,17 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) } /* NEON Float helpers. */ -uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return float32_val(float32_min(make_float32(a), make_float32(b), fpst)); } -uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return float32_val(float32_max(make_float32(a), make_float32(b), fpst)); } -uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = make_float32(a); float32 f1 = make_float32(b); @@ -1799,34 +1724,29 @@ uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. */ -uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_eq_quiet(make_float32(a), make_float32(b), fpst); } -uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_le(make_float32(b), make_float32(a), fpst); } -uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_lt(make_float32(b), make_float32(a), fpst); } -uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); return -float32_le(f1, f0, fpst); } -uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) -{ +uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); @@ -1835,56 +1755,41 @@ uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) -void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); - uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) - | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) - | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) - | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56); - uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8) - | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24) - | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40) - | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56); - uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8) - | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24) - | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40) - | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56); - uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8) - | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) - | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) - | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) | + (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56); + uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8) | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24) | + (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40) | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56); + uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8) | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24) | + (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40) | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56); + uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8) | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) | + (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rm + 1] = make_float64(m1); env->vfp.regs[rd] = make_float64(d0); env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); - uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) - | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); - uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) - | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48); - uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16) - | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); - uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) - | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); + uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48); + uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); + uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rm + 1] = make_float64(m1); env->vfp.regs[rd] = make_float64(d0); env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); @@ -1899,84 +1804,61 @@ void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm = float64_val(env->vfp.regs[rm]); uint64_t zd = float64_val(env->vfp.regs[rd]); - uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) - | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) - | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) - | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56); - uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8) - | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) - | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) - | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) | + (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56); + uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8) | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) | + (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rd] = make_float64(d0); } -void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm = float64_val(env->vfp.regs[rm]); uint64_t zd = float64_val(env->vfp.regs[rd]); - uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) - | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); - uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) - | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); + uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rd] = make_float64(d0); } -void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); - uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) - | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) - | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) - | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56); - uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8) - | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24) - | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40) - | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56); - uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8) - | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24) - | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40) - | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56); - uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8) - | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) - | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) - | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) | + (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56); + uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8) | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24) | + (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40) | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56); + uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8) | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24) | + (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40) | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56); + uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8) | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) | + (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rm + 1] = make_float64(m1); env->vfp.regs[rd] = make_float64(d0); env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); - uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) - | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); - uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) - | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48); - uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16) - | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); - uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) - | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); + uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48); + uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); + uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rm + 1] = make_float64(m1); env->vfp.regs[rd] = make_float64(d0); env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm0 = float64_val(env->vfp.regs[rm]); uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); uint64_t zd0 = float64_val(env->vfp.regs[rd]); @@ -1991,30 +1873,22 @@ void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) env->vfp.regs[rd + 1] = make_float64(d1); } -void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm = float64_val(env->vfp.regs[rm]); uint64_t zd = float64_val(env->vfp.regs[rd]); - uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) - | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) - | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) - | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56); - uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8) - | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) - | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) - | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) | + (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56); + uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8) | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) | + (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rd] = make_float64(d0); } -void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ +void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm) { uint64_t zm = float64_val(env->vfp.regs[rm]); uint64_t zd = float64_val(env->vfp.regs[rd]); - uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) - | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); - uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) - | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); + uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); env->vfp.regs[rm] = make_float64(m0); env->vfp.regs[rd] = make_float64(d0); } diff --git a/src/target-arm/op_addsub.h b/src/target-arm/op_addsub.h index ca4a189..0e9098c 100644 --- a/src/target-arm/op_addsub.h +++ b/src/target-arm/op_addsub.h @@ -10,18 +10,20 @@ #ifdef ARITH_GE #define GE_ARG , void *gep #define DECLARE_GE uint32_t ge = 0 -#define SET_GE *(uint32_t *)gep = ge +#define SET_GE *(uint32_t *) gep = ge #else #define GE_ARG -#define DECLARE_GE do{}while(0) -#define SET_GE do{}while(0) +#define DECLARE_GE \ + do { \ + } while (0) +#define SET_GE \ + do { \ + } while (0) #endif -#define RESULT(val, n, width) \ - res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width) +#define RESULT(val, n, width) res |= ((uint32_t)(glue(glue(uint, width), _t))(val)) << (n * width) -uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, add16))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; @@ -31,8 +33,7 @@ uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG) return res; } -uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, add8))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; @@ -44,8 +45,7 @@ uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG) return res; } -uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, sub16))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; @@ -55,8 +55,7 @@ uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG) return res; } -uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, sub8))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; @@ -68,8 +67,7 @@ uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG) return res; } -uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, subaddx))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; @@ -79,8 +77,7 @@ uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG) return res; } -uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG) -{ +uint32_t HELPER(glue(PFX, addsubx))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 622f3e2..436d7c6 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -21,11 +21,9 @@ #include "dyngen-exec.h" #include "helper.h" - #define SIGNBIT (uint32_t) 0x80000000 #define SIGNBIT64 ((uint64_t) 1 << 63) - struct CPUARMState *env = 0; static void raise_exception(int tt) { @@ -88,11 +86,8 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t max #undef _raw - #endif - - /* try to fill the TLB and return an exception if error. If retaddr is NULL, it means that the function was called in C code (i.e. not from generated code or from helper.c) */ @@ -109,9 +104,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int env = env1; #ifdef CONFIG_SYMBEX - if (unlikely(*g_sqi.events.on_tlb_miss_signals_count)) { + if (unlikely(*g_sqi.events.on_tlb_miss_signals_count)) { g_sqi.events.on_tlb_miss(addr, is_write, retaddr); - } + } ret = cpu_arm_handle_mmu_fault(env, page_addr, is_write, mmu_idx); #else ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx); @@ -140,9 +135,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int }; #ifdef CONFIG_SYMBEX - if (unlikely(*g_sqi.events.on_page_fault_signals_count)) { + if (unlikely(*g_sqi.events.on_page_fault_signals_count)) { g_sqi.events.on_page_fault(addr, is_write, retaddr); - } + } #endif raise_exception(env->exception_index); diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 2aa1ebf..6b534ad 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -18,11 +18,11 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ +#include #include -#include #include +#include #include -#include // clang-format off #include "cpu.h" @@ -33,27 +33,27 @@ #define GEN_HELPER 1 #include "helper.h" - - #include - - #ifdef CONFIG_SYMBEX #include #endif -#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T) -#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5) +#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T) +#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5) /* currently all emulated v5 cores are also v5TE, so don't bother */ -#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5) -#define ENABLE_ARCH_5J 0 -#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) -#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K) -#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2) -#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7) - -#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) +#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5) +#define ENABLE_ARCH_5J 0 +#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) +#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K) +#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2) +#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7) + +#define ARCH(x) \ + do { \ + if (!ENABLE_ARCH_##x) \ + goto illegal_op; \ + } while (0) /* internal defines */ typedef struct DisasContext { @@ -125,41 +125,31 @@ static TCGv_i64 cpu_F0d, cpu_F1d; //#include "gen-icount.h" -static const char *regnames[] = - { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; +static const char *regnames[] = {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc"}; /* initialize TCG globals. */ -void arm_translate_init(void) -{ +void arm_translate_init(void) { int i; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); for (i = 0; i < 16; i++) { - cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, regs[i]), - regnames[i]); + cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, regs[i]), regnames[i]); } - cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); - cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, exclusive_val), "exclusive_val"); - cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, exclusive_high), "exclusive_high"); + cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_val), "exclusive_val"); + cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_high), "exclusive_high"); #ifdef CONFIG_USER_ONLY - cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, exclusive_test), "exclusive_test"); - cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUARMState, exclusive_info), "exclusive_info"); + cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_test), "exclusive_test"); + cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_info), "exclusive_info"); #endif #define GEN_HELPER 2 #include "helper.h" } -static inline TCGv load_cpu_offset(int offset) -{ +static inline TCGv load_cpu_offset(int offset) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp, cpu_env, offset); return tmp; @@ -167,25 +157,22 @@ static inline TCGv load_cpu_offset(int offset) #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) -static inline void store_cpu_offset(TCGv var, int offset) -{ +static inline void store_cpu_offset(TCGv var, int offset) { tcg_gen_st_i32(var, cpu_env, offset); tcg_temp_free_i32(var); } -#define store_cpu_field(var, name) \ - store_cpu_offset(var, offsetof(CPUARMState, name)) +#define store_cpu_field(var, name) store_cpu_offset(var, offsetof(CPUARMState, name)) /* Set a variable to the value of a CPU register. */ -static void load_reg_var(DisasContext *s, TCGv var, int reg) -{ +static void load_reg_var(DisasContext *s, TCGv var, int reg) { if (reg == 15) { uint32_t addr; /* normaly, since we updated PC, we need only to add one insn */ if (s->thumb) - addr = (long)s->pc + 2; + addr = (long) s->pc + 2; else - addr = (long)s->pc + 4; + addr = (long) s->pc + 4; tcg_gen_movi_i32(var, addr); } else { tcg_gen_mov_i32(var, cpu_R[reg]); @@ -193,8 +180,7 @@ static void load_reg_var(DisasContext *s, TCGv var, int reg) } /* Create a new temporary and set it to the value of a CPU register. */ -static inline TCGv load_reg(DisasContext *s, int reg) -{ +static inline TCGv load_reg(DisasContext *s, int reg) { TCGv tmp = tcg_temp_new_i32(); load_reg_var(s, tmp, reg); return tmp; @@ -202,8 +188,7 @@ static inline TCGv load_reg(DisasContext *s, int reg) /* Set a CPU register. The source must be a temporary and will be marked as dead. */ -static void store_reg(DisasContext *s, int reg, TCGv var) -{ +static void store_reg(DisasContext *s, int reg, TCGv var) { if (reg == 15) { tcg_gen_andi_i32(var, var, ~1); s->is_jmp = DISAS_JUMP; @@ -221,9 +206,7 @@ static void store_reg(DisasContext *s, int reg, TCGv var) #define gen_sxtb16(var) gen_helper_sxtb16(var, var) #define gen_uxtb16(var) gen_helper_uxtb16(var, var) - -static inline void gen_set_cpsr(TCGv var, uint32_t mask) -{ +static inline void gen_set_cpsr(TCGv var, uint32_t mask) { TCGv tmp_mask = tcg_const_i32(mask); gen_helper_cpsr_write(var, tmp_mask); tcg_temp_free_i32(tmp_mask); @@ -231,16 +214,14 @@ static inline void gen_set_cpsr(TCGv var, uint32_t mask) /* Set NZCV flags from the high 4 bits of var. */ #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV) -static void gen_exception(int excp) -{ +static void gen_exception(int excp) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, excp); gen_helper_exception(tmp); tcg_temp_free_i32(tmp); } -static void gen_smul_dual(TCGv a, TCGv b) -{ +static void gen_smul_dual(TCGv a, TCGv b) { TCGv tmp1 = tcg_temp_new_i32(); TCGv tmp2 = tcg_temp_new_i32(); tcg_gen_ext16s_i32(tmp1, a); @@ -255,8 +236,7 @@ static void gen_smul_dual(TCGv a, TCGv b) } /* Byteswap each halfword. */ -static void gen_rev16(TCGv var) -{ +static void gen_rev16(TCGv var) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, var, 8); tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff); @@ -267,24 +247,21 @@ static void gen_rev16(TCGv var) } /* Byteswap low halfword and sign extend. */ -static void gen_revsh(TCGv var) -{ +static void gen_revsh(TCGv var) { tcg_gen_ext16u_i32(var, var); tcg_gen_bswap16_i32(var, var); tcg_gen_ext16s_i32(var, var); } /* Unsigned bitfield extract. */ -static void gen_ubfx(TCGv var, int shift, uint32_t mask) -{ +static void gen_ubfx(TCGv var, int shift, uint32_t mask) { if (shift) tcg_gen_shri_i32(var, var, shift); tcg_gen_andi_i32(var, var, mask); } /* Signed bitfield extract. */ -static void gen_sbfx(TCGv var, int shift, int width) -{ +static void gen_sbfx(TCGv var, int shift, int width) { uint32_t signbit; if (shift) @@ -298,8 +275,7 @@ static void gen_sbfx(TCGv var, int shift, int width) } /* Bitfield insertion. Insert val into base. Clobbers base and val. */ -static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) -{ +static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) { tcg_gen_andi_i32(val, val, mask); tcg_gen_shli_i32(val, val, shift); tcg_gen_andi_i32(base, base, ~(mask << shift)); @@ -307,8 +283,7 @@ static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) } /* Return (b << 32) + a. Mark inputs as dead */ -static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b) -{ +static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b) { TCGv_i64 tmp64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp64, b); @@ -321,8 +296,7 @@ static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b) } /* Return (b << 32) - a. Mark inputs as dead. */ -static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b) -{ +static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b) { TCGv_i64 tmp64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp64, b); @@ -337,8 +311,7 @@ static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b) /* FIXME: Most targets have native widening multiplication. It would be good to use that instead of a full wide multiply. */ /* 32x32->64 multiply. Marks inputs as dead. */ -static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b) -{ +static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b) { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); @@ -351,8 +324,7 @@ static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b) return tmp1; } -static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) -{ +static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); @@ -366,8 +338,7 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) } /* Swap low and high halfwords. */ -static void gen_swap_half(TCGv var) -{ +static void gen_swap_half(TCGv var) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, var, 16); tcg_gen_shli_i32(var, var, 16); @@ -382,8 +353,7 @@ static void gen_swap_half(TCGv var) t0 = (t0 + t1) ^ tmp; */ -static void gen_add16(TCGv t0, TCGv t1) -{ +static void gen_add16(TCGv t0, TCGv t1) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_andi_i32(tmp, tmp, 0x8000); @@ -398,8 +368,7 @@ static void gen_add16(TCGv t0, TCGv t1) #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF)) /* Set CF to the top bit of var. */ -static void gen_set_CF_bit31(TCGv var) -{ +static void gen_set_CF_bit31(TCGv var) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, var, 31); gen_set_CF(tmp); @@ -407,15 +376,13 @@ static void gen_set_CF_bit31(TCGv var) } /* Set N and Z flags from var. */ -static inline void gen_logic_CC(TCGv var) -{ +static inline void gen_logic_CC(TCGv var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF)); tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF)); } /* T0 += T1 + CF. */ -static void gen_adc(TCGv t0, TCGv t1) -{ +static void gen_adc(TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_add_i32(t0, t0, t1); tmp = load_cpu_field(CF); @@ -424,8 +391,7 @@ static void gen_adc(TCGv t0, TCGv t1) } /* dest = T0 + T1 + CF. */ -static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) -{ +static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_add_i32(dest, t0, t1); tmp = load_cpu_field(CF); @@ -434,8 +400,7 @@ static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) } /* dest = T0 - T1 + CF - 1. */ -static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) -{ +static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_sub_i32(dest, t0, t1); tmp = load_cpu_field(CF); @@ -447,8 +412,7 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) /* FIXME: Implement this natively. */ #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1) -static void shifter_out_im(TCGv var, int shift) -{ +static void shifter_out_im(TCGv var, int shift) { TCGv tmp = tcg_temp_new_i32(); if (shift == 0) { tcg_gen_andi_i32(tmp, var, 1); @@ -462,264 +426,299 @@ static void shifter_out_im(TCGv var, int shift) } /* Shift by immediate. Includes special handling for shift == 0. */ -static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags) -{ +static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags) { switch (shiftop) { - case 0: /* LSL */ - if (shift != 0) { - if (flags) - shifter_out_im(var, 32 - shift); - tcg_gen_shli_i32(var, var, shift); - } - break; - case 1: /* LSR */ - if (shift == 0) { - if (flags) { - tcg_gen_shri_i32(var, var, 31); - gen_set_CF(var); + case 0: /* LSL */ + if (shift != 0) { + if (flags) + shifter_out_im(var, 32 - shift); + tcg_gen_shli_i32(var, var, shift); } - tcg_gen_movi_i32(var, 0); - } else { - if (flags) - shifter_out_im(var, shift - 1); - tcg_gen_shri_i32(var, var, shift); - } - break; - case 2: /* ASR */ - if (shift == 0) - shift = 32; - if (flags) - shifter_out_im(var, shift - 1); - if (shift == 32) - shift = 31; - tcg_gen_sari_i32(var, var, shift); - break; - case 3: /* ROR/RRX */ - if (shift != 0) { + break; + case 1: /* LSR */ + if (shift == 0) { + if (flags) { + tcg_gen_shri_i32(var, var, 31); + gen_set_CF(var); + } + tcg_gen_movi_i32(var, 0); + } else { + if (flags) + shifter_out_im(var, shift - 1); + tcg_gen_shri_i32(var, var, shift); + } + break; + case 2: /* ASR */ + if (shift == 0) + shift = 32; if (flags) shifter_out_im(var, shift - 1); - tcg_gen_rotri_i32(var, var, shift); break; - } else { - TCGv tmp = load_cpu_field(CF); - if (flags) - shifter_out_im(var, 0); - tcg_gen_shri_i32(var, var, 1); - tcg_gen_shli_i32(tmp, tmp, 31); - tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); - } + if (shift == 32) + shift = 31; + tcg_gen_sari_i32(var, var, shift); + break; + case 3: /* ROR/RRX */ + if (shift != 0) { + if (flags) + shifter_out_im(var, shift - 1); + tcg_gen_rotri_i32(var, var, shift); + break; + } else { + TCGv tmp = load_cpu_field(CF); + if (flags) + shifter_out_im(var, 0); + tcg_gen_shri_i32(var, var, 1); + tcg_gen_shli_i32(tmp, tmp, 31); + tcg_gen_or_i32(var, var, tmp); + tcg_temp_free_i32(tmp); + } } }; -static inline void gen_arm_shift_reg(TCGv var, int shiftop, - TCGv shift, int flags) -{ +static inline void gen_arm_shift_reg(TCGv var, int shiftop, TCGv shift, int flags) { if (flags) { switch (shiftop) { - case 0: gen_helper_shl_cc(var, var, shift); break; - case 1: gen_helper_shr_cc(var, var, shift); break; - case 2: gen_helper_sar_cc(var, var, shift); break; - case 3: gen_helper_ror_cc(var, var, shift); break; + case 0: + gen_helper_shl_cc(var, var, shift); + break; + case 1: + gen_helper_shr_cc(var, var, shift); + break; + case 2: + gen_helper_sar_cc(var, var, shift); + break; + case 3: + gen_helper_ror_cc(var, var, shift); + break; } } else { switch (shiftop) { - case 0: gen_helper_shl(var, var, shift); break; - case 1: gen_helper_shr(var, var, shift); break; - case 2: gen_helper_sar(var, var, shift); break; - case 3: tcg_gen_andi_i32(shift, shift, 0x1f); - tcg_gen_rotr_i32(var, var, shift); break; + case 0: + gen_helper_shl(var, var, shift); + break; + case 1: + gen_helper_shr(var, var, shift); + break; + case 2: + gen_helper_sar(var, var, shift); + break; + case 3: + tcg_gen_andi_i32(shift, shift, 0x1f); + tcg_gen_rotr_i32(var, var, shift); + break; } } tcg_temp_free_i32(shift); } -#define PAS_OP(pfx) \ - switch (op2) { \ - case 0: gen_pas_helper(glue(pfx,add16)); break; \ - case 1: gen_pas_helper(glue(pfx,addsubx)); break; \ - case 2: gen_pas_helper(glue(pfx,subaddx)); break; \ - case 3: gen_pas_helper(glue(pfx,sub16)); break; \ - case 4: gen_pas_helper(glue(pfx,add8)); break; \ - case 7: gen_pas_helper(glue(pfx,sub8)); break; \ +#define PAS_OP(pfx) \ + switch (op2) { \ + case 0: \ + gen_pas_helper(glue(pfx, add16)); \ + break; \ + case 1: \ + gen_pas_helper(glue(pfx, addsubx)); \ + break; \ + case 2: \ + gen_pas_helper(glue(pfx, subaddx)); \ + break; \ + case 3: \ + gen_pas_helper(glue(pfx, sub16)); \ + break; \ + case 4: \ + gen_pas_helper(glue(pfx, add8)); \ + break; \ + case 7: \ + gen_pas_helper(glue(pfx, sub8)); \ + break; \ } -static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b) -{ +static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b) { TCGv_ptr tmp; switch (op1) { -#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp) - case 1: - tmp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(s) - tcg_temp_free_ptr(tmp); - break; - case 5: - tmp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(u) - tcg_temp_free_ptr(tmp); - break; +#define gen_pas_helper(name) glue(gen_helper_, name)(a, a, b, tmp) + case 1: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tmp); + break; + case 5: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tmp); + break; #undef gen_pas_helper -#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b) - case 2: - PAS_OP(q); - break; - case 3: - PAS_OP(sh); - break; - case 6: - PAS_OP(uq); - break; - case 7: - PAS_OP(uh); - break; +#define gen_pas_helper(name) glue(gen_helper_, name)(a, a, b) + case 2: + PAS_OP(q); + break; + case 3: + PAS_OP(sh); + break; + case 6: + PAS_OP(uq); + break; + case 7: + PAS_OP(uh); + break; #undef gen_pas_helper } } #undef PAS_OP /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */ -#define PAS_OP(pfx) \ - switch (op1) { \ - case 0: gen_pas_helper(glue(pfx,add8)); break; \ - case 1: gen_pas_helper(glue(pfx,add16)); break; \ - case 2: gen_pas_helper(glue(pfx,addsubx)); break; \ - case 4: gen_pas_helper(glue(pfx,sub8)); break; \ - case 5: gen_pas_helper(glue(pfx,sub16)); break; \ - case 6: gen_pas_helper(glue(pfx,subaddx)); break; \ +#define PAS_OP(pfx) \ + switch (op1) { \ + case 0: \ + gen_pas_helper(glue(pfx, add8)); \ + break; \ + case 1: \ + gen_pas_helper(glue(pfx, add16)); \ + break; \ + case 2: \ + gen_pas_helper(glue(pfx, addsubx)); \ + break; \ + case 4: \ + gen_pas_helper(glue(pfx, sub8)); \ + break; \ + case 5: \ + gen_pas_helper(glue(pfx, sub16)); \ + break; \ + case 6: \ + gen_pas_helper(glue(pfx, subaddx)); \ + break; \ } -static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) -{ +static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) { TCGv_ptr tmp; switch (op2) { -#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp) - case 0: - tmp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(s) - tcg_temp_free_ptr(tmp); - break; - case 4: - tmp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(u) - tcg_temp_free_ptr(tmp); - break; +#define gen_pas_helper(name) glue(gen_helper_, name)(a, a, b, tmp) + case 0: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tmp); + break; + case 4: + tmp = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tmp); + break; #undef gen_pas_helper -#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b) - case 1: - PAS_OP(q); - break; - case 2: - PAS_OP(sh); - break; - case 5: - PAS_OP(uq); - break; - case 6: - PAS_OP(uh); - break; +#define gen_pas_helper(name) glue(gen_helper_, name)(a, a, b) + case 1: + PAS_OP(q); + break; + case 2: + PAS_OP(sh); + break; + case 5: + PAS_OP(uq); + break; + case 6: + PAS_OP(uh); + break; #undef gen_pas_helper } } #undef PAS_OP -static void gen_test_cc(int cc, int label) -{ +static void gen_test_cc(int cc, int label) { TCGv tmp; TCGv tmp2; int inv; switch (cc) { - case 0: /* eq: Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 1: /* ne: !Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - break; - case 2: /* cs: C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - break; - case 3: /* cc: !C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 4: /* mi: N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 5: /* pl: !N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 6: /* vs: V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 7: /* vc: !V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 8: /* hi: C && !Z */ - inv = gen_new_label(); - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - tcg_temp_free_i32(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - gen_set_label(inv); - break; - case 9: /* ls: !C || Z */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - tcg_temp_free_i32(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 10: /* ge: N == V -> N ^ V == 0 */ - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - break; - case 11: /* lt: N != V -> N ^ V != 0 */ - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - case 12: /* gt: !Z && N == V */ - inv = gen_new_label(); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - tcg_temp_free_i32(tmp); - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - gen_set_label(inv); - break; - case 13: /* le: Z || N != V */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - tcg_temp_free_i32(tmp); - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; - default: - fprintf(stderr, "Bad condition code 0x%x\n", cc); - abort(); + case 0: /* eq: Z */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 1: /* ne: !Z */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + break; + case 2: /* cs: C */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + break; + case 3: /* cc: !C */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 4: /* mi: N */ + tmp = load_cpu_field(NF); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 5: /* pl: !N */ + tmp = load_cpu_field(NF); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 6: /* vs: V */ + tmp = load_cpu_field(VF); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 7: /* vc: !V */ + tmp = load_cpu_field(VF); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 8: /* hi: C && !Z */ + inv = gen_new_label(); + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + gen_set_label(inv); + break; + case 9: /* ls: !C || Z */ + tmp = load_cpu_field(CF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + break; + case 10: /* ge: N == V -> N ^ V == 0 */ + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + break; + case 11: /* lt: N != V -> N ^ V != 0 */ + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + case 12: /* gt: !Z && N == V */ + inv = gen_new_label(); + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + gen_set_label(inv); + break; + case 13: /* le: Z || N != V */ + tmp = load_cpu_field(ZF); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tcg_temp_free_i32(tmp); + tmp = load_cpu_field(VF); + tmp2 = load_cpu_field(NF); + tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); + break; + default: + fprintf(stderr, "Bad condition code 0x%x\n", cc); + abort(); } tcg_temp_free_i32(tmp); } @@ -744,8 +743,7 @@ static const uint8_t table_logic_cc[16] = { }; /* Set PC and Thumb state from an immediate address. */ -static inline void gen_bx_im(DisasContext *s, uint32_t addr) -{ +static inline void gen_bx_im(DisasContext *s, uint32_t addr) { TCGv tmp; s->is_jmp = DISAS_UPDATE; @@ -759,8 +757,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr) } /* Set PC and Thumb state from var. var is marked as dead. */ -static inline void gen_bx(DisasContext *s, TCGv var) -{ +static inline void gen_bx(DisasContext *s, TCGv var) { s->is_jmp = DISAS_UPDATE; tcg_gen_andi_i32(cpu_R[15], var, ~1); tcg_gen_andi_i32(var, var, 1); @@ -770,9 +767,7 @@ static inline void gen_bx(DisasContext *s, TCGv var) /* Variant of store_reg which uses branch&exchange logic when storing to r15 in ARM architecture v7 and above. The source must be a temporary and will be marked as dead. */ -static inline void store_reg_bx(CPUARMState *env, DisasContext *s, - int reg, TCGv var) -{ +static inline void store_reg_bx(CPUARMState *env, DisasContext *s, int reg, TCGv var) { if (reg == 15 && ENABLE_ARCH_7) { gen_bx(s, var); } else { @@ -784,9 +779,7 @@ static inline void store_reg_bx(CPUARMState *env, DisasContext *s, * to r15 in ARM architecture v5T and above. This is used for storing * the results of a LDR/LDM/POP into r15, and corresponds to the cases * in the ARM ARM which use the LoadWritePC() pseudocode function. */ -static inline void store_reg_from_load(CPUARMState *env, DisasContext *s, - int reg, TCGv var) -{ +static inline void store_reg_from_load(CPUARMState *env, DisasContext *s, int reg, TCGv var) { if (reg == 15 && ENABLE_ARCH_5) { gen_bx(s, var); @@ -795,78 +788,64 @@ static inline void store_reg_from_load(CPUARMState *env, DisasContext *s, } } -static inline TCGv gen_ld8s(TCGv addr, int index) -{ +static inline TCGv gen_ld8s(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld8s(tmp, addr, index); return tmp; } -static inline TCGv gen_ld8u(TCGv addr, int index) -{ +static inline TCGv gen_ld8u(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld8u(tmp, addr, index); return tmp; } -static inline TCGv gen_ld16s(TCGv addr, int index) -{ +static inline TCGv gen_ld16s(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld16s(tmp, addr, index); return tmp; } -static inline TCGv gen_ld16u(TCGv addr, int index) -{ +static inline TCGv gen_ld16u(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld16u(tmp, addr, index); return tmp; } -static inline TCGv gen_ld32(TCGv addr, int index) -{ +static inline TCGv gen_ld32(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld32u(tmp, addr, index); return tmp; } -static inline TCGv_i64 gen_ld64(TCGv addr, int index) -{ +static inline TCGv_i64 gen_ld64(TCGv addr, int index) { TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld64(tmp, addr, index); return tmp; } -static inline void gen_st8(TCGv val, TCGv addr, int index) -{ +static inline void gen_st8(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st8(val, addr, index); tcg_temp_free_i32(val); } -static inline void gen_st16(TCGv val, TCGv addr, int index) -{ +static inline void gen_st16(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st16(val, addr, index); tcg_temp_free_i32(val); } -static inline void gen_st32(TCGv val, TCGv addr, int index) -{ +static inline void gen_st32(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st32(val, addr, index); tcg_temp_free_i32(val); } -static inline void gen_st64(TCGv_i64 val, TCGv addr, int index) -{ +static inline void gen_st64(TCGv_i64 val, TCGv addr, int index) { tcg_gen_qemu_st64(val, addr, index); tcg_temp_free_i64(val); } -static inline void gen_set_pc_im(uint32_t val) -{ +static inline void gen_set_pc_im(uint32_t val) { tcg_gen_movi_i32(cpu_R[15], val); } /* Force a TB lookup after an instruction that changes the CPU state. */ -static inline void gen_lookup_tb(DisasContext *s) -{ +static inline void gen_lookup_tb(DisasContext *s) { tcg_gen_movi_i32(cpu_R[15], s->pc & ~1); s->is_jmp = DISAS_UPDATE; } -static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, - TCGv var) -{ +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, TCGv var) { int val, rm, shift, shiftop; TCGv offset; @@ -879,7 +858,7 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, tcg_gen_addi_i32(var, var, val); } else { /* shift/register */ - rm = (insn) & 0xf; + rm = (insn) &0xf; shift = (insn >> 7) & 0x1f; shiftop = (insn >> 5) & 3; offset = load_reg(s, rm); @@ -892,9 +871,7 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, } } -static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, - int extra, TCGv var) -{ +static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, int extra, TCGv var) { int val, rm; TCGv offset; @@ -910,7 +887,7 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, /* register */ if (extra) tcg_gen_addi_i32(var, var, extra); - rm = (insn) & 0xf; + rm = (insn) &0xf; offset = load_reg(s, rm); if (!(insn & (1 << 23))) tcg_gen_sub_i32(var, var, offset); @@ -920,8 +897,7 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, } } -static TCGv_ptr get_fpstatus_ptr(int neon) -{ +static TCGv_ptr get_fpstatus_ptr(int neon) { TCGv_ptr statusptr = tcg_temp_new_ptr(); int offset; if (neon) { @@ -933,17 +909,16 @@ static TCGv_ptr get_fpstatus_ptr(int neon) return statusptr; } -#define VFP_OP2(name) \ -static inline void gen_vfp_##name(int dp) \ -{ \ - TCGv_ptr fpst = get_fpstatus_ptr(0); \ - if (dp) { \ - gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \ - } else { \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \ - } \ - tcg_temp_free_ptr(fpst); \ -} +#define VFP_OP2(name) \ + static inline void gen_vfp_##name(int dp) { \ + TCGv_ptr fpst = get_fpstatus_ptr(0); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \ + } \ + tcg_temp_free_ptr(fpst); \ + } VFP_OP2(add) VFP_OP2(sub) @@ -952,8 +927,7 @@ VFP_OP2(div) #undef VFP_OP2 -static inline void gen_vfp_F1_mul(int dp) -{ +static inline void gen_vfp_F1_mul(int dp) { /* Like gen_vfp_mul() but put result in F1 */ TCGv_ptr fpst = get_fpstatus_ptr(0); if (dp) { @@ -964,8 +938,7 @@ static inline void gen_vfp_F1_mul(int dp) tcg_temp_free_ptr(fpst); } -static inline void gen_vfp_F1_neg(int dp) -{ +static inline void gen_vfp_F1_neg(int dp) { /* Like gen_vfp_neg() but put result in F1 */ if (dp) { gen_helper_vfp_negd(cpu_F1d, cpu_F0d); @@ -974,81 +947,73 @@ static inline void gen_vfp_F1_neg(int dp) } } -static inline void gen_vfp_abs(int dp) -{ +static inline void gen_vfp_abs(int dp) { if (dp) gen_helper_vfp_absd(cpu_F0d, cpu_F0d); else gen_helper_vfp_abss(cpu_F0s, cpu_F0s); } -static inline void gen_vfp_neg(int dp) -{ +static inline void gen_vfp_neg(int dp) { if (dp) gen_helper_vfp_negd(cpu_F0d, cpu_F0d); else gen_helper_vfp_negs(cpu_F0s, cpu_F0s); } -static inline void gen_vfp_sqrt(int dp) -{ +static inline void gen_vfp_sqrt(int dp) { if (dp) gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env); else gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env); } -static inline void gen_vfp_cmp(int dp) -{ +static inline void gen_vfp_cmp(int dp) { if (dp) gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env); else gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env); } -static inline void gen_vfp_cmpe(int dp) -{ +static inline void gen_vfp_cmpe(int dp) { if (dp) gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env); else gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env); } -static inline void gen_vfp_F1_ld0(int dp) -{ +static inline void gen_vfp_F1_ld0(int dp) { if (dp) tcg_gen_movi_i64(cpu_F1d, 0); else tcg_gen_movi_i32(cpu_F1s, 0); } -#define VFP_GEN_ITOF(name) \ -static inline void gen_vfp_##name(int dp, int neon) \ -{ \ - TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ - if (dp) { \ - gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \ - } else { \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ - } \ - tcg_temp_free_ptr(statusptr); \ -} +#define VFP_GEN_ITOF(name) \ + static inline void gen_vfp_##name(int dp, int neon) { \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ + } VFP_GEN_ITOF(uito) VFP_GEN_ITOF(sito) #undef VFP_GEN_ITOF -#define VFP_GEN_FTOI(name) \ -static inline void gen_vfp_##name(int dp, int neon) \ -{ \ - TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ - if (dp) { \ - gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \ - } else { \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ - } \ - tcg_temp_free_ptr(statusptr); \ -} +#define VFP_GEN_FTOI(name) \ + static inline void gen_vfp_##name(int dp, int neon) { \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ + } VFP_GEN_FTOI(toui) VFP_GEN_FTOI(touiz) @@ -1056,19 +1021,18 @@ VFP_GEN_FTOI(tosi) VFP_GEN_FTOI(tosiz) #undef VFP_GEN_FTOI -#define VFP_GEN_FIX(name) \ -static inline void gen_vfp_##name(int dp, int shift, int neon) \ -{ \ - TCGv tmp_shift = tcg_const_i32(shift); \ - TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ - if (dp) { \ - gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \ - } else { \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \ - } \ - tcg_temp_free_i32(tmp_shift); \ - tcg_temp_free_ptr(statusptr); \ -} +#define VFP_GEN_FIX(name) \ + static inline void gen_vfp_##name(int dp, int shift, int neon) { \ + TCGv tmp_shift = tcg_const_i32(shift); \ + TCGv_ptr statusptr = get_fpstatus_ptr(neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \ + } \ + tcg_temp_free_i32(tmp_shift); \ + tcg_temp_free_ptr(statusptr); \ + } VFP_GEN_FIX(tosh) VFP_GEN_FIX(tosl) VFP_GEN_FIX(touh) @@ -1079,66 +1043,54 @@ VFP_GEN_FIX(uhto) VFP_GEN_FIX(ulto) #undef VFP_GEN_FIX -static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr) -{ +static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr) { if (dp) tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s)); else tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s)); } -static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr) -{ +static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr) { if (dp) tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s)); else tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s)); } -static inline long -vfp_reg_offset (int dp, int reg) -{ +static inline long vfp_reg_offset(int dp, int reg) { if (dp) return offsetof(CPUARMState, vfp.regs[reg]); else if (reg & 1) { - return offsetof(CPUARMState, vfp.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.upper); + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + offsetof(CPU_DoubleU, l.upper); } else { - return offsetof(CPUARMState, vfp.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.lower); + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + offsetof(CPU_DoubleU, l.lower); } } /* Return the offset of a 32-bit piece of a NEON register. zero is the least significant end of the register. */ -static inline long -neon_reg_offset (int reg, int n) -{ +static inline long neon_reg_offset(int reg, int n) { int sreg; sreg = reg * 2 + n; return vfp_reg_offset(0, sreg); } -static TCGv neon_load_reg(int reg, int pass) -{ +static TCGv neon_load_reg(int reg, int pass) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass)); return tmp; } -static void neon_store_reg(int reg, int pass, TCGv var) -{ +static void neon_store_reg(int reg, int pass, TCGv var) { tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); tcg_temp_free_i32(var); } -static inline void neon_load_reg64(TCGv_i64 var, int reg) -{ +static inline void neon_load_reg64(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); } -static inline void neon_store_reg64(TCGv_i64 var, int reg) -{ +static inline void neon_store_reg64(TCGv_i64 var, int reg) { tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); } @@ -1147,107 +1099,92 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg) #define tcg_gen_st_f32 tcg_gen_st_i32 #define tcg_gen_st_f64 tcg_gen_st_i64 -static inline void gen_mov_F0_vreg(int dp, int reg) -{ +static inline void gen_mov_F0_vreg(int dp, int reg) { if (dp) tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); else tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); } -static inline void gen_mov_F1_vreg(int dp, int reg) -{ +static inline void gen_mov_F1_vreg(int dp, int reg) { if (dp) tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg)); else tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg)); } -static inline void gen_mov_vreg_F0(int dp, int reg) -{ +static inline void gen_mov_vreg_F0(int dp, int reg) { if (dp) tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); else tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); } -#define ARM_CP_RW_BIT (1 << 20) +#define ARM_CP_RW_BIT (1 << 20) -static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) -{ +static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); } -static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) -{ +static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) { tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); } -static inline TCGv iwmmxt_load_creg(int reg) -{ +static inline TCGv iwmmxt_load_creg(int reg) { TCGv var = tcg_temp_new_i32(); tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); return var; } -static inline void iwmmxt_store_creg(int reg, TCGv var) -{ +static inline void iwmmxt_store_creg(int reg, TCGv var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); tcg_temp_free_i32(var); } -static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) -{ +static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) { iwmmxt_store_reg(cpu_M0, rn); } -static inline void gen_op_iwmmxt_movq_M0_wRn(int rn) -{ +static inline void gen_op_iwmmxt_movq_M0_wRn(int rn) { iwmmxt_load_reg(cpu_M0, rn); } -static inline void gen_op_iwmmxt_orq_M0_wRn(int rn) -{ +static inline void gen_op_iwmmxt_orq_M0_wRn(int rn) { iwmmxt_load_reg(cpu_V1, rn); tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1); } -static inline void gen_op_iwmmxt_andq_M0_wRn(int rn) -{ +static inline void gen_op_iwmmxt_andq_M0_wRn(int rn) { iwmmxt_load_reg(cpu_V1, rn); tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1); } -static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn) -{ +static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn) { iwmmxt_load_reg(cpu_V1, rn); tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1); } -#define IWMMXT_OP(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ -{ \ - iwmmxt_load_reg(cpu_V1, rn); \ - gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \ -} +#define IWMMXT_OP(name) \ + static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) { \ + iwmmxt_load_reg(cpu_V1, rn); \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \ + } -#define IWMMXT_OP_ENV(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ -{ \ - iwmmxt_load_reg(cpu_V1, rn); \ - gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \ -} +#define IWMMXT_OP_ENV(name) \ + static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) { \ + iwmmxt_load_reg(cpu_V1, rn); \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \ + } #define IWMMXT_OP_ENV_SIZE(name) \ -IWMMXT_OP_ENV(name##b) \ -IWMMXT_OP_ENV(name##w) \ -IWMMXT_OP_ENV(name##l) + IWMMXT_OP_ENV(name##b) \ + IWMMXT_OP_ENV(name##w) \ + IWMMXT_OP_ENV(name##l) -#define IWMMXT_OP_ENV1(name) \ -static inline void gen_op_iwmmxt_##name##_M0(void) \ -{ \ - gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \ -} +#define IWMMXT_OP_ENV1(name) \ + static inline void gen_op_iwmmxt_##name##_M0(void) { \ + gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \ + } IWMMXT_OP(maddsq) IWMMXT_OP(madduq) @@ -1297,7 +1234,7 @@ IWMMXT_OP_ENV(avgb1) IWMMXT_OP_ENV(avgw0) IWMMXT_OP_ENV(avgw1) -//IWMMXT_OP(msadb) +// IWMMXT_OP(msadb) IWMMXT_OP_ENV(packuw) IWMMXT_OP_ENV(packul) @@ -1306,38 +1243,33 @@ IWMMXT_OP_ENV(packsw) IWMMXT_OP_ENV(packsl) IWMMXT_OP_ENV(packsq) -static void gen_op_iwmmxt_set_mup(void) -{ +static void gen_op_iwmmxt_set_mup(void) { TCGv tmp; tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); tcg_gen_ori_i32(tmp, tmp, 2); store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); } -static void gen_op_iwmmxt_set_cup(void) -{ +static void gen_op_iwmmxt_set_cup(void) { TCGv tmp; tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); tcg_gen_ori_i32(tmp, tmp, 1); store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); } -static void gen_op_iwmmxt_setpsr_nz(void) -{ +static void gen_op_iwmmxt_setpsr_nz(void) { TCGv tmp = tcg_temp_new_i32(); gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0); store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); } -static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) -{ +static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) { iwmmxt_load_reg(cpu_V1, rn); tcg_gen_ext32u_i64(cpu_V1, cpu_V1); tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); } -static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest) -{ +static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest) { int rd; uint32_t offset; TCGv tmp; @@ -1370,8 +1302,7 @@ static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest) return 0; } -static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) -{ +static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) { int rd = (insn >> 0) & 0xf; TCGv tmp; @@ -1394,8 +1325,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ -static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ +static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { int rd, wrd; int rdhi, rdlo, rd0, rd1, i; TCGv addr; @@ -1406,12 +1336,12 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) wrd = insn & 0xf; rdlo = (insn >> 12) & 0xf; rdhi = (insn >> 16) & 0xf; - if (insn & ARM_CP_RW_BIT) { /* TMRRC */ + if (insn & ARM_CP_RW_BIT) { /* TMRRC */ iwmmxt_load_reg(cpu_V0, wrd); tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); - } else { /* TMCRR */ + } else { /* TMCRR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); iwmmxt_store_reg(cpu_V0, wrd); gen_op_iwmmxt_set_mup(); @@ -1426,23 +1356,23 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) return 1; } if (insn & ARM_CP_RW_BIT) { - if ((insn >> 28) == 0xf) { /* WLDRW wCx */ + if ((insn >> 28) == 0xf) { /* WLDRW wCx */ tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s)); iwmmxt_store_creg(wrd, tmp); } else { i = 1; if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WLDRD */ + if (insn & (1 << 22)) { /* WLDRD */ tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s)); i = 0; - } else { /* WLDRW wRd */ + } else { /* WLDRW wRd */ tmp = gen_ld32(addr, IS_USER(s)); } } else { - if (insn & (1 << 22)) { /* WLDRH */ + if (insn & (1 << 22)) { /* WLDRH */ tmp = gen_ld16u(addr, IS_USER(s)); - } else { /* WLDRB */ + } else { /* WLDRB */ tmp = gen_ld8u(addr, IS_USER(s)); } } @@ -1453,25 +1383,25 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_wRn_M0(wrd); } } else { - if ((insn >> 28) == 0xf) { /* WSTRW wCx */ + if ((insn >> 28) == 0xf) { /* WSTRW wCx */ tmp = iwmmxt_load_creg(wrd); gen_st32(tmp, addr, IS_USER(s)); } else { gen_op_iwmmxt_movq_M0_wRn(wrd); tmp = tcg_temp_new_i32(); if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WSTRD */ + if (insn & (1 << 22)) { /* WSTRD */ tcg_temp_free_i32(tmp); tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s)); - } else { /* WSTRW wRd */ + } else { /* WSTRW wRd */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st32(tmp, addr, IS_USER(s)); } } else { - if (insn & (1 << 22)) { /* WSTRH */ + if (insn & (1 << 22)) { /* WSTRH */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st16(tmp, addr, IS_USER(s)); - } else { /* WSTRB */ + } else { /* WSTRB */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st8(tmp, addr, IS_USER(s)); } @@ -1486,911 +1416,1083 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) return 1; switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { - case 0x000: /* WOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_orq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x011: /* TMCR */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - switch (wrd) { - case ARM_IWMMXT_wCID: - case ARM_IWMMXT_wCASF: - break; - case ARM_IWMMXT_wCon: - gen_op_iwmmxt_set_cup(); - /* Fall through. */ - case ARM_IWMMXT_wCSSF: - tmp = iwmmxt_load_creg(wrd); - tmp2 = load_reg(s, rd); - tcg_gen_andc_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - iwmmxt_store_creg(wrd, tmp); - break; - case ARM_IWMMXT_wCGR0: - case ARM_IWMMXT_wCGR1: - case ARM_IWMMXT_wCGR2: - case ARM_IWMMXT_wCGR3: + case 0x000: /* WOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_orq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); - tmp = load_reg(s, rd); - iwmmxt_store_creg(wrd, tmp); - break; - default: - return 1; - } - break; - case 0x100: /* WXOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_xorq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x111: /* TMRC */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = iwmmxt_load_creg(wrd); - store_reg(s, rd, tmp); - break; - case 0x300: /* WANDN */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tcg_gen_neg_i64(cpu_M0, cpu_M0); - gen_op_iwmmxt_andq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x200: /* WAND */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_andq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x810: case 0xa10: /* WMADD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_maddsq_M0_wRn(rd1); - else - gen_op_iwmmxt_madduq_M0_wRn(rd1); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpacklb_M0_wRn(rd1); break; - case 1: - gen_op_iwmmxt_unpacklw_M0_wRn(rd1); + case 0x011: /* TMCR */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + switch (wrd) { + case ARM_IWMMXT_wCID: + case ARM_IWMMXT_wCASF: + break; + case ARM_IWMMXT_wCon: + gen_op_iwmmxt_set_cup(); + /* Fall through. */ + case ARM_IWMMXT_wCSSF: + tmp = iwmmxt_load_creg(wrd); + tmp2 = load_reg(s, rd); + tcg_gen_andc_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + iwmmxt_store_creg(wrd, tmp); + break; + case ARM_IWMMXT_wCGR0: + case ARM_IWMMXT_wCGR1: + case ARM_IWMMXT_wCGR2: + case ARM_IWMMXT_wCGR3: + gen_op_iwmmxt_set_cup(); + tmp = load_reg(s, rd); + iwmmxt_store_creg(wrd, tmp); + break; + default: + return 1; + } break; - case 2: - gen_op_iwmmxt_unpackll_M0_wRn(rd1); + case 0x100: /* WXOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_xorq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpackhb_M0_wRn(rd1); + case 0x111: /* TMRC */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = iwmmxt_load_creg(wrd); + store_reg(s, rd, tmp); break; - case 1: - gen_op_iwmmxt_unpackhw_M0_wRn(rd1); + case 0x300: /* WANDN */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tcg_gen_neg_i64(cpu_M0, cpu_M0); + gen_op_iwmmxt_andq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 2: - gen_op_iwmmxt_unpackhl_M0_wRn(rd1); + case 0x200: /* WAND */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + gen_op_iwmmxt_andq_M0_wRn(rd1); + gen_op_iwmmxt_setpsr_nz(); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 22)) - gen_op_iwmmxt_sadw_M0_wRn(rd1); - else - gen_op_iwmmxt_sadb_M0_wRn(rd1); - if (!(insn & (1 << 20))) - gen_op_iwmmxt_addl_M0_wRn(wrd); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_mulshw_M0_wRn(rd1); - else - gen_op_iwmmxt_mulslw_M0_wRn(rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_muluhw_M0_wRn(rd1); + case 0x810: + case 0xa10: /* WMADD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_maddsq_M0_wRn(rd1); else - gen_op_iwmmxt_mululw_M0_wRn(rd1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_macsw_M0_wRn(rd1); - else - gen_op_iwmmxt_macuw_M0_wRn(rd1); - if (!(insn & (1 << 20))) { - iwmmxt_load_reg(cpu_V1, wrd); - tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_cmpeqb_M0_wRn(rd1); - break; - case 1: - gen_op_iwmmxt_cmpeqw_M0_wRn(rd1); + gen_op_iwmmxt_madduq_M0_wRn(rd1); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x10e: + case 0x50e: + case 0x90e: + case 0xd0e: /* WUNPCKIL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpacklb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_unpacklw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_unpackll_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 2: - gen_op_iwmmxt_cmpeql_M0_wRn(rd1); + case 0x10c: + case 0x50c: + case 0x90c: + case 0xd0c: /* WUNPCKIH */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpackhb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_unpackhw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_unpackhl_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 22)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgw1_M0_wRn(rd1); - else - gen_op_iwmmxt_avgw0_M0_wRn(rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgb1_M0_wRn(rd1); + case 0x012: + case 0x112: + case 0x412: + case 0x512: /* WSAD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 22)) + gen_op_iwmmxt_sadw_M0_wRn(rd1); else - gen_op_iwmmxt_avgb0_M0_wRn(rd1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); - tcg_gen_andi_i32(tmp, tmp, 7); - iwmmxt_load_reg(cpu_V1, rd1); - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - gen_op_iwmmxt_movq_M0_wRn(wrd); - switch ((insn >> 6) & 3) { - case 0: - tmp2 = tcg_const_i32(0xff); - tmp3 = tcg_const_i32((insn & 7) << 3); - break; - case 1: - tmp2 = tcg_const_i32(0xffff); - tmp3 = tcg_const_i32((insn & 3) << 4); - break; - case 2: - tmp2 = tcg_const_i32(0xffffffff); - tmp3 = tcg_const_i32((insn & 1) << 5); - break; - default: - TCGV_UNUSED(tmp2); - TCGV_UNUSED(tmp3); - } - gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); - tcg_temp_free(tmp3); - tcg_temp_free(tmp2); - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - if (rd == 15 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(wrd); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); - if (insn & 8) { - tcg_gen_ext8s_i32(tmp, tmp); + gen_op_iwmmxt_sadb_M0_wRn(rd1); + if (!(insn & (1 << 20))) + gen_op_iwmmxt_addl_M0_wRn(wrd); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x010: + case 0x110: + case 0x210: + case 0x310: /* WMUL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_mulshw_M0_wRn(rd1); + else + gen_op_iwmmxt_mulslw_M0_wRn(rd1); } else { - tcg_gen_andi_i32(tmp, tmp, 0xff); + if (insn & (1 << 20)) + gen_op_iwmmxt_muluhw_M0_wRn(rd1); + else + gen_op_iwmmxt_mululw_M0_wRn(rd1); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x410: + case 0x510: + case 0x610: + case 0x710: /* WMAC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_macsw_M0_wRn(rd1); + else + gen_op_iwmmxt_macuw_M0_wRn(rd1); + if (!(insn & (1 << 20))) { + iwmmxt_load_reg(cpu_V1, wrd); + tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x006: + case 0x406: + case 0x806: + case 0xc06: /* WCMPEQ */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_cmpeqb_M0_wRn(rd1); + break; + case 1: + gen_op_iwmmxt_cmpeqw_M0_wRn(rd1); + break; + case 2: + gen_op_iwmmxt_cmpeql_M0_wRn(rd1); + break; + case 3: + return 1; } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 1: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); - if (insn & 8) { - tcg_gen_ext16s_i32(tmp, tmp); + case 0x800: + case 0x900: + case 0xc00: + case 0xd00: /* WAVG2 */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + if (insn & (1 << 22)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgw1_M0_wRn(rd1); + else + gen_op_iwmmxt_avgw0_M0_wRn(rd1); } else { - tcg_gen_andi_i32(tmp, tmp, 0xffff); + if (insn & (1 << 20)) + gen_op_iwmmxt_avgb1_M0_wRn(rd1); + else + gen_op_iwmmxt_avgb0_M0_wRn(rd1); } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 2: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ - if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0); - break; - case 1: - tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4); - break; - case 2: - tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12); - break; - } - tcg_gen_shli_i32(tmp, tmp, 28); - gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); - break; - case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - switch ((insn >> 6) & 3) { - case 0: - gen_helper_iwmmxt_bcstb(cpu_M0, tmp); - break; - case 1: - gen_helper_iwmmxt_bcstw(cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_bcstl(cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 4); - tcg_gen_and_i32(tmp, tmp, tmp2); - } - break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 8); - tcg_gen_and_i32(tmp, tmp, tmp2); + case 0x802: + case 0x902: + case 0xa02: + case 0xb02: /* WALIGNR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); + tcg_gen_andi_i32(tmp, tmp, 7); + iwmmxt_load_reg(cpu_V1, rd1); + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x601: + case 0x605: + case 0x609: + case 0x60d: /* TINSR */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + gen_op_iwmmxt_movq_M0_wRn(wrd); + switch ((insn >> 6) & 3) { + case 0: + tmp2 = tcg_const_i32(0xff); + tmp3 = tcg_const_i32((insn & 7) << 3); + break; + case 1: + tmp2 = tcg_const_i32(0xffff); + tmp3 = tcg_const_i32((insn & 3) << 4); + break; + case 2: + tmp2 = tcg_const_i32(0xffffffff); + tmp3 = tcg_const_i32((insn & 1) << 5); + break; + default: + TCGV_UNUSED(tmp2); + TCGV_UNUSED(tmp3); } + gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); + tcg_temp_free(tmp3); + tcg_temp_free(tmp2); + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); break; - case 2: - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_and_i32(tmp, tmp, tmp2); - break; - } - gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; - case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0); - break; - case 1: - gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0); - break; - case 2: - gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 4); - tcg_gen_or_i32(tmp, tmp, tmp2); + case 0x107: + case 0x507: + case 0x907: + case 0xd07: /* TEXTRM */ + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + if (rd == 15 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(wrd); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + if (insn & 8) { + tcg_gen_ext8s_i32(tmp, tmp); + } else { + tcg_gen_andi_i32(tmp, tmp, 0xff); + } + break; + case 1: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + if (insn & 8) { + tcg_gen_ext16s_i32(tmp, tmp); + } else { + tcg_gen_andi_i32(tmp, tmp, 0xffff); + } + break; + case 2: + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); + tcg_gen_trunc_i64_i32(tmp, cpu_M0); + break; } + store_reg(s, rd, tmp); break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 8); - tcg_gen_or_i32(tmp, tmp, tmp2); + case 0x117: + case 0x517: + case 0x917: + case 0xd17: /* TEXTRC */ + if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0); + break; + case 1: + tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4); + break; + case 2: + tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12); + break; } - break; - case 2: - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_or_i32(tmp, tmp, tmp2); - break; - } - gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; - case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ - rd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_msbb(tmp, cpu_M0); - break; - case 1: - gen_helper_iwmmxt_msbw(tmp, cpu_M0); - break; - case 2: - gen_helper_iwmmxt_msbl(tmp, cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ - case 0x906: case 0xb06: case 0xd06: case 0xf06: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ - case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsb_M0(); - else - gen_op_iwmmxt_unpacklub_M0(); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsw_M0(); - else - gen_op_iwmmxt_unpackluw_M0(); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsl_M0(); - else - gen_op_iwmmxt_unpacklul_M0(); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ - case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsb_M0(); - else - gen_op_iwmmxt_unpackhub_M0(); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsw_M0(); - else - gen_op_iwmmxt_unpackhuw_M0(); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsl_M0(); - else - gen_op_iwmmxt_unpackhul_M0(); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ - case 0x214: case 0x614: case 0xa14: case 0xe14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_gen_shli_i32(tmp, tmp, 28); + gen_set_nzcv(tmp); tcg_temp_free_i32(tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); break; - } - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ - case 0x014: case 0x414: case 0x814: case 0xc14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + case 0x401: + case 0x405: + case 0x409: + case 0x40d: /* TBCST */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + switch ((insn >> 6) & 3) { + case 0: + gen_helper_iwmmxt_bcstb(cpu_M0, tmp); + break; + case 1: + gen_helper_iwmmxt_bcstw(cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_bcstl(cpu_M0, tmp); + break; + } tcg_temp_free_i32(tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ - case 0x114: case 0x514: case 0x914: case 0xd14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x113: + case 0x513: + case 0x913: + case 0xd13: /* TANDC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i++) { + tcg_gen_shli_i32(tmp2, tmp2, 4); + tcg_gen_and_i32(tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i++) { + tcg_gen_shli_i32(tmp2, tmp2, 8); + tcg_gen_and_i32(tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + } + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp); break; - case 3: - gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ - case 0x314: case 0x714: case 0xb14: case 0xf14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 1: - if (gen_iwmmxt_shift(insn, 0xf, tmp)) { - tcg_temp_free_i32(tmp); - return 1; + case 0x01c: + case 0x41c: + case 0x81c: + case 0xc1c: /* WACC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0); + break; + case 1: + gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0); + break; + case 2: + gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0); + break; + case 3: + return 1; } - gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); break; - case 2: - if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { - tcg_temp_free_i32(tmp); + case 0x115: + case 0x515: + case 0x915: + case 0xd15: /* TORC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i++) { + tcg_gen_shli_i32(tmp2, tmp2, 4); + tcg_gen_or_i32(tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i++) { + tcg_gen_shli_i32(tmp2, tmp2, 8); + tcg_gen_or_i32(tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp, tmp, tmp2); + break; } - gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); break; - case 3: - if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { - tcg_temp_free_i32(tmp); + case 0x103: + case 0x503: + case 0x903: + case 0xd03: /* TMOVMSK */ + rd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) return 1; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_msbb(tmp, cpu_M0); + break; + case 1: + gen_helper_iwmmxt_msbw(tmp, cpu_M0); + break; + case 2: + gen_helper_iwmmxt_msbl(tmp, cpu_M0); + break; } - gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ - case 0x916: case 0xb16: case 0xd16: case 0xf16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsb_M0_wRn(rd1); - else - gen_op_iwmmxt_minub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsw_M0_wRn(rd1); - else - gen_op_iwmmxt_minuw_M0_wRn(rd1); + store_reg(s, rd, tmp); break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsl_M0_wRn(rd1); - else - gen_op_iwmmxt_minul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ - case 0x816: case 0xa16: case 0xc16: case 0xe16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsb_M0_wRn(rd1); - else - gen_op_iwmmxt_maxub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsw_M0_wRn(rd1); - else - gen_op_iwmmxt_maxuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsl_M0_wRn(rd1); - else - gen_op_iwmmxt_maxul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ - case 0x402: case 0x502: case 0x602: case 0x702: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_const_i32((insn >> 20) & 3); - iwmmxt_load_reg(cpu_V1, rd1); - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - tcg_temp_free(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ - case 0x41a: case 0x51a: case 0x61a: case 0x71a: - case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: - case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_subnb_M0_wRn(rd1); - break; - case 0x1: - gen_op_iwmmxt_subub_M0_wRn(rd1); - break; - case 0x3: - gen_op_iwmmxt_subsb_M0_wRn(rd1); - break; - case 0x4: - gen_op_iwmmxt_subnw_M0_wRn(rd1); - break; - case 0x5: - gen_op_iwmmxt_subuw_M0_wRn(rd1); - break; - case 0x7: - gen_op_iwmmxt_subsw_M0_wRn(rd1); - break; - case 0x8: - gen_op_iwmmxt_subnl_M0_wRn(rd1); - break; - case 0x9: - gen_op_iwmmxt_subul_M0_wRn(rd1); - break; - case 0xb: - gen_op_iwmmxt_subsl_M0_wRn(rd1); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ - case 0x41e: case 0x51e: case 0x61e: case 0x71e: - case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: - case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); - gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); - tcg_temp_free(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ - case 0x418: case 0x518: case 0x618: case 0x718: - case 0x818: case 0x918: case 0xa18: case 0xb18: - case 0xc18: case 0xd18: case 0xe18: case 0xf18: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_addnb_M0_wRn(rd1); - break; - case 0x1: - gen_op_iwmmxt_addub_M0_wRn(rd1); - break; - case 0x3: - gen_op_iwmmxt_addsb_M0_wRn(rd1); - break; - case 0x4: - gen_op_iwmmxt_addnw_M0_wRn(rd1); - break; - case 0x5: - gen_op_iwmmxt_adduw_M0_wRn(rd1); + case 0x106: + case 0x306: + case 0x506: + case 0x706: /* WCMPGT */ + case 0x906: + case 0xb06: + case 0xd06: + case 0xf06: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1); + else + gen_op_iwmmxt_cmpgtul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0x7: - gen_op_iwmmxt_addsw_M0_wRn(rd1); + case 0x00e: + case 0x20e: + case 0x40e: + case 0x60e: /* WUNPCKEL */ + case 0x80e: + case 0xa0e: + case 0xc0e: + case 0xe0e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsb_M0(); + else + gen_op_iwmmxt_unpacklub_M0(); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsw_M0(); + else + gen_op_iwmmxt_unpackluw_M0(); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsl_M0(); + else + gen_op_iwmmxt_unpacklul_M0(); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0x8: - gen_op_iwmmxt_addnl_M0_wRn(rd1); + case 0x00c: + case 0x20c: + case 0x40c: + case 0x60c: /* WUNPCKEH */ + case 0x80c: + case 0xa0c: + case 0xc0c: + case 0xe0c: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsb_M0(); + else + gen_op_iwmmxt_unpackhub_M0(); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsw_M0(); + else + gen_op_iwmmxt_unpackhuw_M0(); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsl_M0(); + else + gen_op_iwmmxt_unpackhul_M0(); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0x9: - gen_op_iwmmxt_addul_M0_wRn(rd1); + case 0x204: + case 0x604: + case 0xa04: + case 0xe04: /* WSRL */ + case 0x214: + case 0x614: + case 0xa14: + case 0xe14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0xb: - gen_op_iwmmxt_addsl_M0_wRn(rd1); + case 0x004: + case 0x404: + case 0x804: + case 0xc04: /* WSRA */ + case 0x014: + case 0x414: + case 0x814: + case 0xc14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ - case 0x408: case 0x508: case 0x608: case 0x708: - case 0x808: case 0x908: case 0xa08: case 0xb08: - case 0xc08: case 0xd08: case 0xe08: case 0xf08: - if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsw_M0_wRn(rd1); - else - gen_op_iwmmxt_packuw_M0_wRn(rd1); + case 0x104: + case 0x504: + case 0x904: + case 0xd04: /* WSLL */ + case 0x114: + case 0x514: + case 0x914: + case 0xd14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + if (gen_iwmmxt_shift(insn, 0xff, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsl_M0_wRn(rd1); - else - gen_op_iwmmxt_packul_M0_wRn(rd1); + case 0x304: + case 0x704: + case 0xb04: + case 0xf04: /* WROR */ + case 0x314: + case 0x714: + case 0xb14: + case 0xf14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_temp_new_i32(); + switch ((insn >> 22) & 3) { + case 1: + if (gen_iwmmxt_shift(insn, 0xf, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 2: + if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); + break; + case 3: + if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { + tcg_temp_free_i32(tmp); + return 1; + } + gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 3: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsq_M0_wRn(rd1); - else - gen_op_iwmmxt_packuq_M0_wRn(rd1); + case 0x116: + case 0x316: + case 0x516: + case 0x716: /* WMIN */ + case 0x916: + case 0xb16: + case 0xd16: + case 0xf16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsb_M0_wRn(rd1); + else + gen_op_iwmmxt_minub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsw_M0_wRn(rd1); + else + gen_op_iwmmxt_minuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsl_M0_wRn(rd1); + else + gen_op_iwmmxt_minul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x016: + case 0x216: + case 0x416: + case 0x616: /* WMAX */ + case 0x816: + case 0xa16: + case 0xc16: + case 0xe16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsb_M0_wRn(rd1); + else + gen_op_iwmmxt_maxub_M0_wRn(rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsw_M0_wRn(rd1); + else + gen_op_iwmmxt_maxuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsl_M0_wRn(rd1); + else + gen_op_iwmmxt_maxul_M0_wRn(rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x002: + case 0x102: + case 0x202: + case 0x302: /* WALIGNI */ + case 0x402: + case 0x502: + case 0x602: + case 0x702: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_const_i32((insn >> 20) & 3); + iwmmxt_load_reg(cpu_V1, rd1); + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); + tcg_temp_free(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + case 0x01a: + case 0x11a: + case 0x21a: + case 0x31a: /* WSUB */ + case 0x41a: + case 0x51a: + case 0x61a: + case 0x71a: + case 0x81a: + case 0x91a: + case 0xa1a: + case 0xb1a: + case 0xc1a: + case 0xd1a: + case 0xe1a: + case 0xf1a: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_subnb_M0_wRn(rd1); + break; + case 0x1: + gen_op_iwmmxt_subub_M0_wRn(rd1); + break; + case 0x3: + gen_op_iwmmxt_subsb_M0_wRn(rd1); + break; + case 0x4: + gen_op_iwmmxt_subnw_M0_wRn(rd1); + break; + case 0x5: + gen_op_iwmmxt_subuw_M0_wRn(rd1); + break; + case 0x7: + gen_op_iwmmxt_subsw_M0_wRn(rd1); + break; + case 0x8: + gen_op_iwmmxt_subnl_M0_wRn(rd1); + break; + case 0x9: + gen_op_iwmmxt_subul_M0_wRn(rd1); + break; + case 0xb: + gen_op_iwmmxt_subsl_M0_wRn(rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x201: case 0x203: case 0x205: case 0x207: - case 0x209: case 0x20b: case 0x20d: case 0x20f: - case 0x211: case 0x213: case 0x215: case 0x217: - case 0x219: case 0x21b: case 0x21d: case 0x21f: - wrd = (insn >> 5) & 0xf; - rd0 = (insn >> 12) & 0xf; - rd1 = (insn >> 0) & 0xf; - if (rd0 == 0xf || rd1 == 0xf) - return 1; - gen_op_iwmmxt_movq_M0_wRn(wrd); - tmp = load_reg(s, rd0); - tmp2 = load_reg(s, rd1); - switch ((insn >> 16) & 0xf) { - case 0x0: /* TMIA */ - gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); + case 0x01e: + case 0x11e: + case 0x21e: + case 0x31e: /* WSHUFH */ + case 0x41e: + case 0x51e: + case 0x61e: + case 0x71e: + case 0x81e: + case 0x91e: + case 0xa1e: + case 0xb1e: + case 0xc1e: + case 0xd1e: + case 0xe1e: + case 0xf1e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); + gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); + tcg_temp_free(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0x8: /* TMIAPH */ - gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); + case 0x018: + case 0x118: + case 0x218: + case 0x318: /* WADD */ + case 0x418: + case 0x518: + case 0x618: + case 0x718: + case 0x818: + case 0x918: + case 0xa18: + case 0xb18: + case 0xc18: + case 0xd18: + case 0xe18: + case 0xf18: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_addnb_M0_wRn(rd1); + break; + case 0x1: + gen_op_iwmmxt_addub_M0_wRn(rd1); + break; + case 0x3: + gen_op_iwmmxt_addsb_M0_wRn(rd1); + break; + case 0x4: + gen_op_iwmmxt_addnw_M0_wRn(rd1); + break; + case 0x5: + gen_op_iwmmxt_adduw_M0_wRn(rd1); + break; + case 0x7: + gen_op_iwmmxt_addsw_M0_wRn(rd1); + break; + case 0x8: + gen_op_iwmmxt_addnl_M0_wRn(rd1); + break; + case 0x9: + gen_op_iwmmxt_addul_M0_wRn(rd1); + break; + case 0xb: + gen_op_iwmmxt_addsl_M0_wRn(rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); + case 0x008: + case 0x108: + case 0x208: + case 0x308: /* WPACK */ + case 0x408: + case 0x508: + case 0x608: + case 0x708: + case 0x808: + case 0x908: + case 0xa08: + case 0xb08: + case 0xc08: + case 0xd08: + case 0xe08: + case 0xf08: + if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(rd0); + switch ((insn >> 22) & 3) { + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsw_M0_wRn(rd1); + else + gen_op_iwmmxt_packuw_M0_wRn(rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsl_M0_wRn(rd1); + else + gen_op_iwmmxt_packul_M0_wRn(rd1); + break; + case 3: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsq_M0_wRn(rd1); + else + gen_op_iwmmxt_packuq_M0_wRn(rd1); + break; + } + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + gen_op_iwmmxt_set_cup(); break; - default: + case 0x201: + case 0x203: + case 0x205: + case 0x207: + case 0x209: + case 0x20b: + case 0x20d: + case 0x20f: + case 0x211: + case 0x213: + case 0x215: + case 0x217: + case 0x219: + case 0x21b: + case 0x21d: + case 0x21f: + wrd = (insn >> 5) & 0xf; + rd0 = (insn >> 12) & 0xf; + rd1 = (insn >> 0) & 0xf; + if (rd0 == 0xf || rd1 == 0xf) + return 1; + gen_op_iwmmxt_movq_M0_wRn(wrd); + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* TMIA */ + gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0x8: /* TMIAPH */ + gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0xc: + case 0xd: + case 0xe: + case 0xf: /* TMIAxy */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); + break; + default: + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + return 1; + } tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); + gen_op_iwmmxt_movq_wRn_M0(wrd); + gen_op_iwmmxt_set_mup(); + break; + default: return 1; - } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - default: - return 1; } return 0; @@ -2398,8 +2500,7 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ -static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ +static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { int acc, rd0, rd1, rdhi, rdlo; TCGv tmp, tmp2; @@ -2415,24 +2516,24 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) tmp = load_reg(s, rd0); tmp2 = load_reg(s, rd1); switch ((insn >> 16) & 0xf) { - case 0x0: /* MIA */ - gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0x8: /* MIAPH */ - gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0xc: /* MIABB */ - case 0xd: /* MIABT */ - case 0xe: /* MIATB */ - case 0xf: /* MIATT */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); - break; - default: - return 1; + case 0x0: /* MIA */ + gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0x8: /* MIAPH */ + gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); + break; + case 0xc: /* MIABB */ + case 0xd: /* MIABT */ + case 0xe: /* MIATB */ + case 0xf: /* MIATT */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); + break; + default: + return 1; } tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); @@ -2450,13 +2551,13 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) if (acc != 0) return 1; - if (insn & ARM_CP_RW_BIT) { /* MRA */ + if (insn & ARM_CP_RW_BIT) { /* MRA */ iwmmxt_load_reg(cpu_V0, acc); tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); - } else { /* MAR */ + } else { /* MAR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); iwmmxt_store_reg(cpu_V0, acc); } @@ -2468,8 +2569,7 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) /* Disassemble system coprocessor instruction. Return nonzero if instruction is not defined. */ -static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ +static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { TCGv tmp, tmp2; uint32_t rd = (insn >> 12) & 0xf; uint32_t cp = (insn >> 8) & 0xf; @@ -2499,8 +2599,7 @@ static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) return 0; } -static int cp15_user_ok(CPUARMState *env, uint32_t insn) -{ +static int cp15_user_ok(CPUARMState *env, uint32_t insn) { int cpn = (insn >> 16) & 0xf; int cpm = insn & 0xf; int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38); @@ -2511,8 +2610,7 @@ static int cp15_user_ok(CPUARMState *env, uint32_t insn) * (b) UNDEF only if PMUSERENR.EN is 0 * (c) always read OK and UNDEF on write (PMUSERENR only) */ - if ((cpm == 12 && (op < 6)) || - (cpm == 13 && (op < 3))) { + if ((cpm == 12 && (op < 6)) || (cpm == 13 && (op < 3))) { return env->cp15.c9_pmuserenr; } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) { /* PMUSERENR, read only */ @@ -2529,8 +2627,7 @@ static int cp15_user_ok(CPUARMState *env, uint32_t insn) return 0; } -static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd) -{ +static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd) { TCGv tmp; int cpn = (insn >> 16) & 0xf; int cpm = insn & 0xf; @@ -2544,35 +2641,35 @@ static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, if (insn & ARM_CP_RW_BIT) { switch (op) { - case 2: - tmp = load_cpu_field(cp15.c13_tls1); - break; - case 3: - tmp = load_cpu_field(cp15.c13_tls2); - break; - case 4: - tmp = load_cpu_field(cp15.c13_tls3); - break; - default: - return 0; + case 2: + tmp = load_cpu_field(cp15.c13_tls1); + break; + case 3: + tmp = load_cpu_field(cp15.c13_tls2); + break; + case 4: + tmp = load_cpu_field(cp15.c13_tls3); + break; + default: + return 0; } store_reg(s, rd, tmp); } else { tmp = load_reg(s, rd); switch (op) { - case 2: - store_cpu_field(tmp, cp15.c13_tls1); - break; - case 3: - store_cpu_field(tmp, cp15.c13_tls2); - break; - case 4: - store_cpu_field(tmp, cp15.c13_tls3); - break; - default: - tcg_temp_free_i32(tmp); - return 0; + case 2: + store_cpu_field(tmp, cp15.c13_tls1); + break; + case 3: + store_cpu_field(tmp, cp15.c13_tls2); + break; + case 4: + store_cpu_field(tmp, cp15.c13_tls3); + break; + default: + tcg_temp_free_i32(tmp); + return 0; } } return 1; @@ -2580,14 +2677,13 @@ static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, /* Disassemble system coprocessor (cp15) instruction. Return nonzero if instruction is not defined. */ -static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ +static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { uint32_t rd; TCGv tmp, tmp2; /* M profile cores use memory mapped registers instead of cp15. */ if (arm_feature(env, ARM_FEATURE_M)) - return 1; + return 1; if ((insn & (1 << 25)) == 0) { if (insn & (1 << 20)) { @@ -2607,50 +2703,50 @@ static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) * get_cp15/set_cp15 helpers, and is more efficient anyway. */ switch ((insn & 0x0fff0fff)) { - case 0x0e070f90: - /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores). - * In v7, this must NOP. - */ - if (IS_USER(s)) { - return 1; - } - if (!arm_feature(env, ARM_FEATURE_V7)) { - /* Wait for interrupt. */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_WFI; - } - return 0; - case 0x0e070f58: - /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI, - * so this is slightly over-broad. - */ - if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) { - /* Wait for interrupt. */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_WFI; - return 0; - } - /* Otherwise continue to handle via helper function. - * In particular, on v7 and some v6 cores this is one of - * the VA-PA registers. - */ - break; - case 0x0e070f3d: - /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */ - if (arm_feature(env, ARM_FEATURE_V6)) { - return IS_USER(s) ? 1 : 0; - } - break; - case 0x0e070f95: /* 0,c7,c5,4 : ISB */ - case 0x0e070f9a: /* 0,c7,c10,4: DSB */ - case 0x0e070fba: /* 0,c7,c10,5: DMB */ - /* Barriers in both v6 and v7 */ - if (arm_feature(env, ARM_FEATURE_V6)) { + case 0x0e070f90: + /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores). + * In v7, this must NOP. + */ + if (IS_USER(s)) { + return 1; + } + if (!arm_feature(env, ARM_FEATURE_V7)) { + /* Wait for interrupt. */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + } return 0; - } - break; - default: - break; + case 0x0e070f58: + /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI, + * so this is slightly over-broad. + */ + if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) { + /* Wait for interrupt. */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + return 0; + } + /* Otherwise continue to handle via helper function. + * In particular, on v7 and some v6 cores this is one of + * the VA-PA registers. + */ + break; + case 0x0e070f3d: + /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */ + if (arm_feature(env, ARM_FEATURE_V6)) { + return IS_USER(s) ? 1 : 0; + } + break; + case 0x0e070f95: /* 0,c7,c5,4 : ISB */ + case 0x0e070f9a: /* 0,c7,c10,4: DSB */ + case 0x0e070fba: /* 0,c7,c10,5: DMB */ + /* Barriers in both v6 and v7 */ + if (arm_feature(env, ARM_FEATURE_V6)) { + return 0; + } + break; + default: + break; } if (IS_USER(s) && !cp15_user_ok(env, insn)) { @@ -2678,8 +2774,7 @@ static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) /* Normally we would always end the TB here, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ - if (!arm_feature(env, ARM_FEATURE_XSCALE) || - (insn & 0x0fff0fff) != 0x0e010f10) + if (!arm_feature(env, ARM_FEATURE_XSCALE) || (insn & 0x0fff0fff) != 0x0e010f10) gen_lookup_tb(s); } tcg_temp_free_i32(tmp2); @@ -2687,41 +2782,38 @@ static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) } #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) -#define VFP_SREG(insn, bigbit, smallbit) \ - ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) -#define VFP_DREG(reg, insn, bigbit, smallbit) do { \ - if (arm_feature(env, ARM_FEATURE_VFP3)) { \ - reg = (((insn) >> (bigbit)) & 0x0f) \ - | (((insn) >> ((smallbit) - 4)) & 0x10); \ - } else { \ - if (insn & (1 << (smallbit))) \ - return 1; \ - reg = ((insn) >> (bigbit)) & 0x0f; \ - }} while (0) +#define VFP_SREG(insn, bigbit, smallbit) ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) +#define VFP_DREG(reg, insn, bigbit, smallbit) \ + do { \ + if (arm_feature(env, ARM_FEATURE_VFP3)) { \ + reg = (((insn) >> (bigbit)) & 0x0f) | (((insn) >> ((smallbit) -4)) & 0x10); \ + } else { \ + if (insn & (1 << (smallbit))) \ + return 1; \ + reg = ((insn) >> (bigbit)) & 0x0f; \ + } \ + } while (0) #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22) #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) -#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7) -#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) -#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) -#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) +#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7) +#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) +#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) +#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) /* Move between integer and VFP cores. */ -static TCGv gen_vfp_mrs(void) -{ +static TCGv gen_vfp_mrs(void) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_F0s); return tmp; } -static void gen_vfp_msr(TCGv tmp) -{ +static void gen_vfp_msr(TCGv tmp) { tcg_gen_mov_i32(cpu_F0s, tmp); tcg_temp_free_i32(tmp); } -static void gen_neon_dup_u8(TCGv var, int shift) -{ +static void gen_neon_dup_u8(TCGv var, int shift) { TCGv tmp = tcg_temp_new_i32(); if (shift) tcg_gen_shri_i32(var, var, shift); @@ -2733,8 +2825,7 @@ static void gen_neon_dup_u8(TCGv var, int shift) tcg_temp_free_i32(tmp); } -static void gen_neon_dup_low16(TCGv var) -{ +static void gen_neon_dup_low16(TCGv var) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_ext16u_i32(var, var); tcg_gen_shli_i32(tmp, var, 16); @@ -2742,8 +2833,7 @@ static void gen_neon_dup_low16(TCGv var) tcg_temp_free_i32(tmp); } -static void gen_neon_dup_high16(TCGv var) -{ +static void gen_neon_dup_high16(TCGv var) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(var, var, 0xffff0000); tcg_gen_shri_i32(tmp, var, 16); @@ -2751,32 +2841,30 @@ static void gen_neon_dup_high16(TCGv var) tcg_temp_free_i32(tmp); } -static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size) -{ +static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size) { /* Load a single Neon element and replicate into a 32 bit TCG reg */ TCGv tmp; switch (size) { - case 0: - tmp = gen_ld8u(addr, IS_USER(s)); - gen_neon_dup_u8(tmp, 0); - break; - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - gen_neon_dup_low16(tmp); - break; - case 2: - tmp = gen_ld32(addr, IS_USER(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + gen_neon_dup_u8(tmp, 0); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + gen_neon_dup_low16(tmp); + break; + case 2: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); } return tmp; } /* Disassemble a VFP instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ -static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_vfp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; int dp, veclen; TCGv addr; @@ -2791,804 +2879,789 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn) if ((insn & 0x0fe00fff) != 0x0ee00a10) return 1; rn = (insn >> 16) & 0xf; - if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC - && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) + if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) return 1; } dp = ((insn & 0xf00) == 0xb00); switch ((insn >> 24) & 0xf) { - case 0xe: - if (insn & (1 << 4)) { - /* single register transfer */ - rd = (insn >> 12) & 0xf; - if (dp) { - int size; - int pass; + case 0xe: + if (insn & (1 << 4)) { + /* single register transfer */ + rd = (insn >> 12) & 0xf; + if (dp) { + int size; + int pass; - VFP_DREG_N(rn, insn); - if (insn & 0xf) - return 1; - if (insn & 0x00c00060 - && !arm_feature(env, ARM_FEATURE_NEON)) - return 1; + VFP_DREG_N(rn, insn); + if (insn & 0xf) + return 1; + if (insn & 0x00c00060 && !arm_feature(env, ARM_FEATURE_NEON)) + return 1; - pass = (insn >> 21) & 1; - if (insn & (1 << 22)) { - size = 0; - offset = ((insn >> 5) & 3) * 8; - } else if (insn & (1 << 5)) { - size = 1; - offset = (insn & (1 << 6)) ? 16 : 0; - } else { - size = 2; - offset = 0; - } - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - tmp = neon_load_reg(rn, pass); - switch (size) { - case 0: - if (offset) - tcg_gen_shri_i32(tmp, tmp, offset); - if (insn & (1 << 23)) - gen_uxtb(tmp); - else - gen_sxtb(tmp); - break; - case 1: + pass = (insn >> 21) & 1; + if (insn & (1 << 22)) { + size = 0; + offset = ((insn >> 5) & 3) * 8; + } else if (insn & (1 << 5)) { + size = 1; + offset = (insn & (1 << 6)) ? 16 : 0; + } else { + size = 2; + offset = 0; + } + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + tmp = neon_load_reg(rn, pass); + switch (size) { + case 0: + if (offset) + tcg_gen_shri_i32(tmp, tmp, offset); + if (insn & (1 << 23)) + gen_uxtb(tmp); + else + gen_sxtb(tmp); + break; + case 1: + if (insn & (1 << 23)) { + if (offset) { + tcg_gen_shri_i32(tmp, tmp, 16); + } else { + gen_uxth(tmp); + } + } else { + if (offset) { + tcg_gen_sari_i32(tmp, tmp, 16); + } else { + gen_sxth(tmp); + } + } + break; + case 2: + break; + } + store_reg(s, rd, tmp); + } else { + /* arm->vfp */ + tmp = load_reg(s, rd); if (insn & (1 << 23)) { - if (offset) { - tcg_gen_shri_i32(tmp, tmp, 16); - } else { - gen_uxth(tmp); + /* VDUP */ + if (size == 0) { + gen_neon_dup_u8(tmp, 0); + } else if (size == 1) { + gen_neon_dup_low16(tmp); } + for (n = 0; n <= pass * 2; n++) { + tmp2 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp2, tmp); + neon_store_reg(rn, n, tmp2); + } + neon_store_reg(rn, n, tmp); } else { - if (offset) { - tcg_gen_sari_i32(tmp, tmp, 16); - } else { - gen_sxth(tmp); + /* VMOV */ + switch (size) { + case 0: + tmp2 = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp2, tmp, offset, 0xff); + tcg_temp_free_i32(tmp2); + break; + case 1: + tmp2 = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp2, tmp, offset, 0xffff); + tcg_temp_free_i32(tmp2); + break; + case 2: + break; } + neon_store_reg(rn, pass, tmp); } - break; - case 2: - break; } - store_reg(s, rd, tmp); - } else { - /* arm->vfp */ - tmp = load_reg(s, rd); - if (insn & (1 << 23)) { - /* VDUP */ - if (size == 0) { - gen_neon_dup_u8(tmp, 0); - } else if (size == 1) { - gen_neon_dup_low16(tmp); + } else { /* !dp */ + if ((insn & 0x6f) != 0x00) + return 1; + rn = VFP_SREG_N(insn); + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (insn & (1 << 21)) { + /* system register */ + rn >>= 1; + + switch (rn) { + case ARM_VFP_FPSID: + /* VFP2 allows access to FSID from userspace. + VFP3 restricts all id registers to privileged + accesses. */ + if (IS_USER(s) && arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + /* Not present in VFP3. */ + if (IS_USER(s) || arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + case ARM_VFP_FPSCR: + if (rd == 15) { + tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); + tcg_gen_andi_i32(tmp, tmp, 0xf0000000); + } else { + tmp = tcg_temp_new_i32(); + gen_helper_vfp_get_fpscr(tmp, cpu_env); + } + break; + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + if (IS_USER(s) || !arm_feature(env, ARM_FEATURE_MVFR)) + return 1; + tmp = load_cpu_field(vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + gen_mov_F0_vreg(0, rn); + tmp = gen_vfp_mrs(); } - for (n = 0; n <= pass * 2; n++) { - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - neon_store_reg(rn, n, tmp2); + if (rd == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + } else { + store_reg(s, rd, tmp); } - neon_store_reg(rn, n, tmp); } else { - /* VMOV */ - switch (size) { - case 0: - tmp2 = neon_load_reg(rn, pass); - gen_bfi(tmp, tmp2, tmp, offset, 0xff); - tcg_temp_free_i32(tmp2); - break; - case 1: - tmp2 = neon_load_reg(rn, pass); - gen_bfi(tmp, tmp2, tmp, offset, 0xffff); - tcg_temp_free_i32(tmp2); - break; - case 2: - break; + /* arm->vfp */ + tmp = load_reg(s, rd); + if (insn & (1 << 21)) { + rn >>= 1; + /* system register */ + switch (rn) { + case ARM_VFP_FPSID: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + gen_helper_vfp_set_fpscr(cpu_env, tmp); + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + /* TODO: VFP subarchitecture support. + * For now, keep the EN bit only */ + tcg_gen_andi_i32(tmp, tmp, 1 << 30); + store_cpu_field(tmp, vfp.xregs[rn]); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + store_cpu_field(tmp, vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rn); } - neon_store_reg(rn, pass, tmp); } } - } else { /* !dp */ - if ((insn & 0x6f) != 0x00) - return 1; - rn = VFP_SREG_N(insn); - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (insn & (1 << 21)) { - /* system register */ - rn >>= 1; - - switch (rn) { - case ARM_VFP_FPSID: - /* VFP2 allows access to FSID from userspace. - VFP3 restricts all id registers to privileged - accesses. */ - if (IS_USER(s) - && arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - /* Not present in VFP3. */ - if (IS_USER(s) - || arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPSCR: - if (rd == 15) { - tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); - tcg_gen_andi_i32(tmp, tmp, 0xf0000000); - } else { - tmp = tcg_temp_new_i32(); - gen_helper_vfp_get_fpscr(tmp, cpu_env); - } - break; - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - if (IS_USER(s) - || !arm_feature(env, ARM_FEATURE_MVFR)) - return 1; - tmp = load_cpu_field(vfp.xregs[rn]); - break; - default: - return 1; - } + } else { + /* data processing */ + /* The opcode is in bits 23, 21, 20 and 6. */ + op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); + if (dp) { + if (op == 15) { + /* rn is opcode */ + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); } else { - gen_mov_F0_vreg(0, rn); - tmp = gen_vfp_mrs(); + /* rn is register number */ + VFP_DREG_N(rn, insn); } - if (rd == 15) { - /* Set the 4 flag bits in the CPSR. */ - gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); + + if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) { + /* Integer or single precision destination. */ + rd = VFP_SREG_D(insn); } else { - store_reg(s, rd, tmp); + VFP_DREG_D(rd, insn); } - } else { - /* arm->vfp */ - tmp = load_reg(s, rd); - if (insn & (1 << 21)) { - rn >>= 1; - /* system register */ - switch (rn) { - case ARM_VFP_FPSID: - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - /* Writes are ignored. */ - break; - case ARM_VFP_FPSCR: - gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); - gen_lookup_tb(s); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - /* TODO: VFP subarchitecture support. - * For now, keep the EN bit only */ - tcg_gen_andi_i32(tmp, tmp, 1 << 30); - store_cpu_field(tmp, vfp.xregs[rn]); - gen_lookup_tb(s); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - store_cpu_field(tmp, vfp.xregs[rn]); - break; - default: - return 1; - } + if (op == 15 && (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) { + /* VCVT from int is always from S reg regardless of dp bit. + * VCVT with immediate frac_bits has same format as SREG_M + */ + rm = VFP_SREG_M(insn); } else { - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rn); + VFP_DREG_M(rm, insn); } - } - } - } else { - /* data processing */ - /* The opcode is in bits 23, 21, 20 and 6. */ - op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); - if (dp) { - if (op == 15) { - /* rn is opcode */ - rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); - } else { - /* rn is register number */ - VFP_DREG_N(rn, insn); - } - - if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) { - /* Integer or single precision destination. */ - rd = VFP_SREG_D(insn); } else { - VFP_DREG_D(rd, insn); - } - if (op == 15 && - (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) { - /* VCVT from int is always from S reg regardless of dp bit. - * VCVT with immediate frac_bits has same format as SREG_M + rn = VFP_SREG_N(insn); + if (op == 15 && rn == 15) { + /* Double precision destination. */ + VFP_DREG_D(rd, insn); + } else { + rd = VFP_SREG_D(insn); + } + /* NB that we implicitly rely on the encoding for the frac_bits + * in VCVT of fixed to float being the same as that of an SREG_M */ rm = VFP_SREG_M(insn); - } else { - VFP_DREG_M(rm, insn); } - } else { - rn = VFP_SREG_N(insn); - if (op == 15 && rn == 15) { - /* Double precision destination. */ - VFP_DREG_D(rd, insn); - } else { - rd = VFP_SREG_D(insn); - } - /* NB that we implicitly rely on the encoding for the frac_bits - * in VCVT of fixed to float being the same as that of an SREG_M - */ - rm = VFP_SREG_M(insn); - } - - veclen = s->vec_len; - if (op == 15 && rn > 3) - veclen = 0; - /* Shut up compiler warnings. */ - delta_m = 0; - delta_d = 0; - bank_mask = 0; + veclen = s->vec_len; + if (op == 15 && rn > 3) + veclen = 0; - if (veclen > 0) { - if (dp) - bank_mask = 0xc; - else - bank_mask = 0x18; + /* Shut up compiler warnings. */ + delta_m = 0; + delta_d = 0; + bank_mask = 0; - /* Figure out what type of vector operation this is. */ - if ((rd & bank_mask) == 0) { - /* scalar */ - veclen = 0; - } else { + if (veclen > 0) { if (dp) - delta_d = (s->vec_stride >> 1) + 1; + bank_mask = 0xc; else - delta_d = s->vec_stride + 1; + bank_mask = 0x18; - if ((rm & bank_mask) == 0) { - /* mixed scalar/vector */ - delta_m = 0; + /* Figure out what type of vector operation this is. */ + if ((rd & bank_mask) == 0) { + /* scalar */ + veclen = 0; } else { - /* vector */ - delta_m = delta_d; - } - } - } + if (dp) + delta_d = (s->vec_stride >> 1) + 1; + else + delta_d = s->vec_stride + 1; - /* Load the initial operands. */ - if (op == 15) { - switch (rn) { - case 16: - case 17: - /* Integer source */ - gen_mov_F0_vreg(0, rm); - break; - case 8: - case 9: - /* Compare */ - gen_mov_F0_vreg(dp, rd); - gen_mov_F1_vreg(dp, rm); - break; - case 10: - case 11: - /* Compare with zero */ - gen_mov_F0_vreg(dp, rd); - gen_vfp_F1_ld0(dp); - break; - case 20: - case 21: - case 22: - case 23: - case 28: - case 29: - case 30: - case 31: - /* Source and destination the same. */ - gen_mov_F0_vreg(dp, rd); - break; - case 4: - case 5: - case 6: - case 7: - /* VCVTB, VCVTT: only present with the halfprec extension, - * UNPREDICTABLE if bit 8 is set (we choose to UNDEF) - */ - if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) { - return 1; + if ((rm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } } - /* Otherwise fall through */ - default: - /* One source operand. */ - gen_mov_F0_vreg(dp, rm); - break; } - } else { - /* Two source operands. */ - gen_mov_F0_vreg(dp, rn); - gen_mov_F1_vreg(dp, rm); - } - for (;;) { - /* Perform the calculation. */ - switch (op) { - case 0: /* VMLA: fd + (fn * fm) */ - /* Note that order of inputs to the add matters for NaNs */ - gen_vfp_F1_mul(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_add(dp); - break; - case 1: /* VMLS: fd + -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_F1_neg(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_add(dp); - break; - case 2: /* VNMLS: -fd + (fn * fm) */ - /* Note that it isn't valid to replace (-A + B) with (B - A) - * or similar plausible looking simplifications - * because this will give wrong results for NaNs. - */ - gen_vfp_F1_mul(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_neg(dp); - gen_vfp_add(dp); - break; - case 3: /* VNMLA: -fd + -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_F1_neg(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_neg(dp); - gen_vfp_add(dp); - break; - case 4: /* mul: fn * fm */ - gen_vfp_mul(dp); - break; - case 5: /* nmul: -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_neg(dp); - break; - case 6: /* add: fn + fm */ - gen_vfp_add(dp); - break; - case 7: /* sub: fn - fm */ - gen_vfp_sub(dp); - break; - case 8: /* div: fn / fm */ - gen_vfp_div(dp); - break; - case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ - case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ - case 12: /* VFMA : fd = muladd( fd, fn, fm) */ - case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ - /* These are fused multiply-add, and must be done as one - * floating point operation with no rounding between the - * multiplication and addition steps. - * NB that doing the negations here as separate steps is - * correct : an input NaN should come out with its sign bit - * flipped if it is a negated-input. - */ - if (!arm_feature(env, ARM_FEATURE_VFP4)) { - return 1; - } - if (dp) { - TCGv_ptr fpst; - TCGv_i64 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negd(cpu_F0d, cpu_F0d); - } - frd = tcg_temp_new_i64(); - tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - /* VFNMA, VFNMS */ - gen_helper_vfp_negd(frd, frd); - } - fpst = get_fpstatus_ptr(0); - gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d, - cpu_F1d, frd, fpst); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(frd); - } else { - TCGv_ptr fpst; - TCGv_i32 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negs(cpu_F0s, cpu_F0s); - } - frd = tcg_temp_new_i32(); - tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - gen_helper_vfp_negs(frd, frd); - } - fpst = get_fpstatus_ptr(0); - gen_helper_vfp_muladds(cpu_F0s, cpu_F0s, - cpu_F1s, frd, fpst); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(frd); - } - break; - case 14: /* fconst */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - - n = (insn << 12) & 0x80000000; - i = ((insn >> 12) & 0x70) | (insn & 0xf); - if (dp) { - if (i & 0x40) - i |= 0x3f80; - else - i |= 0x4000; - n |= i << 16; - tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32); - } else { - if (i & 0x40) - i |= 0x780; - else - i |= 0x800; - n |= i << 19; - tcg_gen_movi_i32(cpu_F0s, n); - } - break; - case 15: /* extension space */ + /* Load the initial operands. */ + if (op == 15) { switch (rn) { - case 0: /* cpy */ - /* no-op */ - break; - case 1: /* abs */ - gen_vfp_abs(dp); - break; - case 2: /* neg */ - gen_vfp_neg(dp); - break; - case 3: /* sqrt */ - gen_vfp_sqrt(dp); - break; - case 4: /* vcvtb.f32.f16 */ - tmp = gen_vfp_mrs(); - tcg_gen_ext16u_i32(tmp, tmp); - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); - tcg_temp_free_i32(tmp); - break; - case 5: /* vcvtt.f32.f16 */ - tmp = gen_vfp_mrs(); - tcg_gen_shri_i32(tmp, tmp, 16); - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); - tcg_temp_free_i32(tmp); - break; - case 6: /* vcvtb.f16.f32 */ - tmp = tcg_temp_new_i32(); - gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); - gen_mov_F0_vreg(0, rd); - tmp2 = gen_vfp_mrs(); - tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - gen_vfp_msr(tmp); - break; - case 7: /* vcvtt.f16.f32 */ - tmp = tcg_temp_new_i32(); - gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); - tcg_gen_shli_i32(tmp, tmp, 16); - gen_mov_F0_vreg(0, rd); - tmp2 = gen_vfp_mrs(); - tcg_gen_ext16u_i32(tmp2, tmp2); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - gen_vfp_msr(tmp); - break; - case 8: /* cmp */ - gen_vfp_cmp(dp); - break; - case 9: /* cmpe */ - gen_vfp_cmpe(dp); - break; - case 10: /* cmpz */ - gen_vfp_cmp(dp); - break; - case 11: /* cmpez */ - gen_vfp_F1_ld0(dp); - gen_vfp_cmpe(dp); - break; - case 15: /* single<->double conversion */ - if (dp) - gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env); - else - gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env); - break; - case 16: /* fuito */ - gen_vfp_uito(dp, 0); - break; - case 17: /* fsito */ - gen_vfp_sito(dp, 0); - break; - case 20: /* fshto */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_shto(dp, 16 - rm, 0); - break; - case 21: /* fslto */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_slto(dp, 32 - rm, 0); - break; - case 22: /* fuhto */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_uhto(dp, 16 - rm, 0); - break; - case 23: /* fulto */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_ulto(dp, 32 - rm, 0); - break; - case 24: /* ftoui */ - gen_vfp_toui(dp, 0); - break; - case 25: /* ftouiz */ - gen_vfp_touiz(dp, 0); - break; - case 26: /* ftosi */ - gen_vfp_tosi(dp, 0); - break; - case 27: /* ftosiz */ - gen_vfp_tosiz(dp, 0); - break; - case 28: /* ftosh */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_tosh(dp, 16 - rm, 0); - break; - case 29: /* ftosl */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_tosl(dp, 32 - rm, 0); - break; - case 30: /* ftouh */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_touh(dp, 16 - rm, 0); - break; - case 31: /* ftoul */ - if (!arm_feature(env, ARM_FEATURE_VFP3)) - return 1; - gen_vfp_toul(dp, 32 - rm, 0); - break; - default: /* undefined */ - return 1; + case 16: + case 17: + /* Integer source */ + gen_mov_F0_vreg(0, rm); + break; + case 8: + case 9: + /* Compare */ + gen_mov_F0_vreg(dp, rd); + gen_mov_F1_vreg(dp, rm); + break; + case 10: + case 11: + /* Compare with zero */ + gen_mov_F0_vreg(dp, rd); + gen_vfp_F1_ld0(dp); + break; + case 20: + case 21: + case 22: + case 23: + case 28: + case 29: + case 30: + case 31: + /* Source and destination the same. */ + gen_mov_F0_vreg(dp, rd); + break; + case 4: + case 5: + case 6: + case 7: + /* VCVTB, VCVTT: only present with the halfprec extension, + * UNPREDICTABLE if bit 8 is set (we choose to UNDEF) + */ + if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) { + return 1; + } + /* Otherwise fall through */ + default: + /* One source operand. */ + gen_mov_F0_vreg(dp, rm); + break; } - break; - default: /* undefined */ - return 1; + } else { + /* Two source operands. */ + gen_mov_F0_vreg(dp, rn); + gen_mov_F1_vreg(dp, rm); } - /* Write back the result. */ - if (op == 15 && (rn >= 8 && rn <= 11)) - ; /* Comparison, do nothing. */ - else if (op == 15 && dp && ((rn & 0x1c) == 0x18)) - /* VCVT double to int: always integer result. */ - gen_mov_vreg_F0(0, rd); - else if (op == 15 && rn == 15) - /* conversion */ - gen_mov_vreg_F0(!dp, rd); - else - gen_mov_vreg_F0(dp, rd); + for (;;) { + /* Perform the calculation. */ + switch (op) { + case 0: /* VMLA: fd + (fn * fm) */ + /* Note that order of inputs to the add matters for NaNs */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 1: /* VMLS: fd + -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 2: /* VNMLS: -fd + (fn * fm) */ + /* Note that it isn't valid to replace (-A + B) with (B - A) + * or similar plausible looking simplifications + * because this will give wrong results for NaNs. + */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_neg(dp); + gen_vfp_add(dp); + break; + case 3: /* VNMLA: -fd + -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_neg(dp); + gen_vfp_add(dp); + break; + case 4: /* mul: fn * fm */ + gen_vfp_mul(dp); + break; + case 5: /* nmul: -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_neg(dp); + break; + case 6: /* add: fn + fm */ + gen_vfp_add(dp); + break; + case 7: /* sub: fn - fm */ + gen_vfp_sub(dp); + break; + case 8: /* div: fn / fm */ + gen_vfp_div(dp); + break; + case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ + case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ + case 12: /* VFMA : fd = muladd( fd, fn, fm) */ + case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (!arm_feature(env, ARM_FEATURE_VFP4)) { + return 1; + } + if (dp) { + TCGv_ptr fpst; + TCGv_i64 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(cpu_F0d, cpu_F0d); + } + frd = tcg_temp_new_i64(); + tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d, cpu_F1d, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(frd); + } else { + TCGv_ptr fpst; + TCGv_i32 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(cpu_F0s, cpu_F0s); + } + frd = tcg_temp_new_i32(); + tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + gen_helper_vfp_negs(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladds(cpu_F0s, cpu_F0s, cpu_F1s, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(frd); + } + break; + case 14: /* fconst */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; - /* break out of the loop if we have finished */ - if (veclen == 0) - break; + n = (insn << 12) & 0x80000000; + i = ((insn >> 12) & 0x70) | (insn & 0xf); + if (dp) { + if (i & 0x40) + i |= 0x3f80; + else + i |= 0x4000; + n |= i << 16; + tcg_gen_movi_i64(cpu_F0d, ((uint64_t) n) << 32); + } else { + if (i & 0x40) + i |= 0x780; + else + i |= 0x800; + n |= i << 19; + tcg_gen_movi_i32(cpu_F0s, n); + } + break; + case 15: /* extension space */ + switch (rn) { + case 0: /* cpy */ + /* no-op */ + break; + case 1: /* abs */ + gen_vfp_abs(dp); + break; + case 2: /* neg */ + gen_vfp_neg(dp); + break; + case 3: /* sqrt */ + gen_vfp_sqrt(dp); + break; + case 4: /* vcvtb.f32.f16 */ + tmp = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp, tmp); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + tcg_temp_free_i32(tmp); + break; + case 5: /* vcvtt.f32.f16 */ + tmp = gen_vfp_mrs(); + tcg_gen_shri_i32(tmp, tmp, 16); + gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env); + tcg_temp_free_i32(tmp); + break; + case 6: /* vcvtb.f16.f32 */ + tmp = tcg_temp_new_i32(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + gen_vfp_msr(tmp); + break; + case 7: /* vcvtt.f16.f32 */ + tmp = tcg_temp_new_i32(); + gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp, tmp, 16); + gen_mov_F0_vreg(0, rd); + tmp2 = gen_vfp_mrs(); + tcg_gen_ext16u_i32(tmp2, tmp2); + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + gen_vfp_msr(tmp); + break; + case 8: /* cmp */ + gen_vfp_cmp(dp); + break; + case 9: /* cmpe */ + gen_vfp_cmpe(dp); + break; + case 10: /* cmpz */ + gen_vfp_cmp(dp); + break; + case 11: /* cmpez */ + gen_vfp_F1_ld0(dp); + gen_vfp_cmpe(dp); + break; + case 15: /* single<->double conversion */ + if (dp) + gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env); + else + gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env); + break; + case 16: /* fuito */ + gen_vfp_uito(dp, 0); + break; + case 17: /* fsito */ + gen_vfp_sito(dp, 0); + break; + case 20: /* fshto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_shto(dp, 16 - rm, 0); + break; + case 21: /* fslto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_slto(dp, 32 - rm, 0); + break; + case 22: /* fuhto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_uhto(dp, 16 - rm, 0); + break; + case 23: /* fulto */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_ulto(dp, 32 - rm, 0); + break; + case 24: /* ftoui */ + gen_vfp_toui(dp, 0); + break; + case 25: /* ftouiz */ + gen_vfp_touiz(dp, 0); + break; + case 26: /* ftosi */ + gen_vfp_tosi(dp, 0); + break; + case 27: /* ftosiz */ + gen_vfp_tosiz(dp, 0); + break; + case 28: /* ftosh */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_tosh(dp, 16 - rm, 0); + break; + case 29: /* ftosl */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_tosl(dp, 32 - rm, 0); + break; + case 30: /* ftouh */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_touh(dp, 16 - rm, 0); + break; + case 31: /* ftoul */ + if (!arm_feature(env, ARM_FEATURE_VFP3)) + return 1; + gen_vfp_toul(dp, 32 - rm, 0); + break; + default: /* undefined */ + return 1; + } + break; + default: /* undefined */ + return 1; + } - if (op == 15 && delta_m == 0) { - /* single source one-many */ - while (veclen--) { - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); + /* Write back the result. */ + if (op == 15 && (rn >= 8 && rn <= 11)) + ; /* Comparison, do nothing. */ + else if (op == 15 && dp && ((rn & 0x1c) == 0x18)) + /* VCVT double to int: always integer result. */ + gen_mov_vreg_F0(0, rd); + else if (op == 15 && rn == 15) + /* conversion */ + gen_mov_vreg_F0(!dp, rd); + else gen_mov_vreg_F0(dp, rd); - } - break; - } - /* Setup the next operands. */ - veclen--; - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); - if (op == 15) { - /* One source operand. */ - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F0_vreg(dp, rm); - } else { - /* Two source operands. */ - rn = ((rn + delta_d) & (bank_mask - 1)) - | (rn & bank_mask); - gen_mov_F0_vreg(dp, rn); - if (delta_m) { - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F1_vreg(dp, rm); + /* break out of the loop if we have finished */ + if (veclen == 0) + break; + + if (op == 15 && delta_m == 0) { + /* single source one-many */ + while (veclen--) { + rd = ((rd + delta_d) & (bank_mask - 1)) | (rd & bank_mask); + gen_mov_vreg_F0(dp, rd); + } + break; + } + /* Setup the next operands. */ + veclen--; + rd = ((rd + delta_d) & (bank_mask - 1)) | (rd & bank_mask); + + if (op == 15) { + /* One source operand. */ + rm = ((rm + delta_m) & (bank_mask - 1)) | (rm & bank_mask); + gen_mov_F0_vreg(dp, rm); + } else { + /* Two source operands. */ + rn = ((rn + delta_d) & (bank_mask - 1)) | (rn & bank_mask); + gen_mov_F0_vreg(dp, rn); + if (delta_m) { + rm = ((rm + delta_m) & (bank_mask - 1)) | (rm & bank_mask); + gen_mov_F1_vreg(dp, rm); + } } } } - } - break; - case 0xc: - case 0xd: - if ((insn & 0x03e00000) == 0x00400000) { - /* two-register transfer */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (dp) { - VFP_DREG_M(rm, insn); - } else { - rm = VFP_SREG_M(insn); - } - - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (dp) { - gen_mov_F0_vreg(0, rm * 2); - tmp = gen_vfp_mrs(); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(0, rm * 2 + 1); - tmp = gen_vfp_mrs(); - store_reg(s, rn, tmp); - } else { - gen_mov_F0_vreg(0, rm); - tmp = gen_vfp_mrs(); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(0, rm + 1); - tmp = gen_vfp_mrs(); - store_reg(s, rn, tmp); - } - } else { - /* arm->vfp */ + break; + case 0xc: + case 0xd: + if ((insn & 0x03e00000) == 0x00400000) { + /* two-register transfer */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; if (dp) { - tmp = load_reg(s, rd); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm * 2); - tmp = load_reg(s, rn); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm * 2 + 1); - } else { - tmp = load_reg(s, rd); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm); - tmp = load_reg(s, rn); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm + 1); - } - } - } else { - /* Load/store */ - rn = (insn >> 16) & 0xf; - if (dp) - VFP_DREG_D(rd, insn); - else - rd = VFP_SREG_D(insn); - if ((insn & 0x01200000) == 0x01000000) { - /* Single load/store */ - offset = (insn & 0xff) << 2; - if ((insn & (1 << 23)) == 0) - offset = -offset; - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - tcg_gen_addi_i32(addr, addr, offset); - if (insn & (1 << 20)) { - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(dp, rd); + VFP_DREG_M(rm, insn); } else { - gen_mov_F0_vreg(dp, rd); - gen_vfp_st(s, dp, addr); - } - tcg_temp_free_i32(addr); - } else { - /* load/store multiple */ - int w = insn & (1 << 21); - if (dp) - n = (insn >> 1) & 0x7f; - else - n = insn & 0xff; - - if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { - /* P == U , W == 1 => UNDEF */ - return 1; - } - if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { - /* UNPREDICTABLE cases for bad immediates: we choose to - * UNDEF to avoid generating huge numbers of TCG ops - */ - return 1; - } - if (rn == 15 && w) { - /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ - return 1; + rm = VFP_SREG_M(insn); } - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (dp) { + gen_mov_F0_vreg(0, rm * 2); + tmp = gen_vfp_mrs(); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(0, rm * 2 + 1); + tmp = gen_vfp_mrs(); + store_reg(s, rn, tmp); + } else { + gen_mov_F0_vreg(0, rm); + tmp = gen_vfp_mrs(); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(0, rm + 1); + tmp = gen_vfp_mrs(); + store_reg(s, rn, tmp); + } } else { - addr = load_reg(s, rn); + /* arm->vfp */ + if (dp) { + tmp = load_reg(s, rd); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm * 2); + tmp = load_reg(s, rn); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm * 2 + 1); + } else { + tmp = load_reg(s, rd); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm); + tmp = load_reg(s, rn); + gen_vfp_msr(tmp); + gen_mov_vreg_F0(0, rm + 1); + } } - if (insn & (1 << 24)) /* pre-decrement */ - tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2)); - + } else { + /* Load/store */ + rn = (insn >> 16) & 0xf; if (dp) - offset = 8; + VFP_DREG_D(rd, insn); else - offset = 4; - for (i = 0; i < n; i++) { - if (insn & ARM_CP_RW_BIT) { - /* load */ + rd = VFP_SREG_D(insn); + if ((insn & 0x01200000) == 0x01000000) { + /* Single load/store */ + offset = (insn & 0xff) << 2; + if ((insn & (1 << 23)) == 0) + offset = -offset; + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + tcg_gen_addi_i32(addr, addr, offset); + if (insn & (1 << 20)) { gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(dp, rd + i); + gen_mov_vreg_F0(dp, rd); } else { - /* store */ - gen_mov_F0_vreg(dp, rd + i); + gen_mov_F0_vreg(dp, rd); gen_vfp_st(s, dp, addr); } - tcg_gen_addi_i32(addr, addr, offset); - } - if (w) { - /* writeback */ - if (insn & (1 << 24)) - offset = -offset * n; - else if (dp && (insn & 1)) - offset = 4; + tcg_temp_free_i32(addr); + } else { + /* load/store multiple */ + int w = insn & (1 << 21); + if (dp) + n = (insn >> 1) & 0x7f; else - offset = 0; + n = insn & 0xff; + + if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { + /* P == U , W == 1 => UNDEF */ + return 1; + } + if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { + /* UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return 1; + } + if (rn == 15 && w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return 1; + } + + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + if (insn & (1 << 24)) /* pre-decrement */ + tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2)); - if (offset != 0) + if (dp) + offset = 8; + else + offset = 4; + for (i = 0; i < n; i++) { + if (insn & ARM_CP_RW_BIT) { + /* load */ + gen_vfp_ld(s, dp, addr); + gen_mov_vreg_F0(dp, rd + i); + } else { + /* store */ + gen_mov_F0_vreg(dp, rd + i); + gen_vfp_st(s, dp, addr); + } tcg_gen_addi_i32(addr, addr, offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); + } + if (w) { + /* writeback */ + if (insn & (1 << 24)) + offset = -offset * n; + else if (dp && (insn & 1)) + offset = 4; + else + offset = 0; + + if (offset != 0) + tcg_gen_addi_i32(addr, addr, offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } } } - } - break; - default: - /* Should never happen. */ - return 1; + break; + default: + /* Should never happen. */ + return 1; } return 0; } -static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) -{ +static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) { TranslationBlock *tb; tb = s->tb; if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { tcg_gen_goto_tb(n); gen_set_pc_im(dest); - tcg_gen_exit_tb((tcg_target_long)tb + n); + tcg_gen_exit_tb((tcg_target_long) tb + n); } else { gen_set_pc_im(dest); tcg_gen_exit_tb(0); } } -static inline void gen_jmp (DisasContext *s, uint32_t dest) -{ +static inline void gen_jmp(DisasContext *s, uint32_t dest) { if (unlikely(s->singlestep_enabled)) { /* An indirect jump so that we still trigger the debug exception. */ if (s->thumb) @@ -3600,8 +3673,7 @@ static inline void gen_jmp (DisasContext *s, uint32_t dest) } } -static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y) -{ +static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y) { if (x) tcg_gen_sari_i32(t0, t0, 16); else @@ -3647,8 +3719,7 @@ static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) } /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ -static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0) -{ +static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0) { TCGv tmp; if (spsr) { /* ??? This is also undefined in system mode. */ @@ -3669,8 +3740,7 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0) } /* Returns nonzero if access to the PSR is not permitted. */ -static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) -{ +static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) { TCGv tmp; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); @@ -3678,8 +3748,7 @@ static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val } /* Generate an old-style exception return. Marks pc as dead. */ -static void gen_exception_return(DisasContext *s, TCGv pc) -{ +static void gen_exception_return(DisasContext *s, TCGv pc) { TCGv tmp; store_reg(s, 15, pc); tmp = load_cpu_field(spsr); @@ -3689,17 +3758,14 @@ static void gen_exception_return(DisasContext *s, TCGv pc) } /* Generate a v6 exception return. Marks both values as dead. */ -static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr) -{ +static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr) { gen_set_cpsr(cpsr, 0xffffffff); tcg_temp_free_i32(cpsr); store_reg(s, 15, pc); s->is_jmp = DISAS_UPDATE; } -static inline void -gen_set_condexec (DisasContext *s) -{ +static inline void gen_set_condexec(DisasContext *s) { if (s->condexec_mask) { uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); TCGv tmp = tcg_temp_new_i32(); @@ -3708,118 +3774,131 @@ gen_set_condexec (DisasContext *s) } } -static void gen_exception_insn(DisasContext *s, int offset, int excp) -{ +static void gen_exception_insn(DisasContext *s, int offset, int excp) { gen_set_condexec(s); gen_set_pc_im(s->pc - offset); gen_exception(excp); s->is_jmp = DISAS_JUMP; } -static void gen_nop_hint(DisasContext *s, int val) -{ +static void gen_nop_hint(DisasContext *s, int val) { switch (val) { - case 3: /* wfi */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_WFI; - break; - case 2: /* wfe */ - case 4: /* sev */ - /* TODO: Implement SEV and WFE. May help SMP performance. */ - default: /* nop */ - break; - } + case 3: /* wfi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + break; + case 2: /* wfe */ + case 4: /* sev */ + /* TODO: Implement SEV and WFE. May help SMP performance. */ + default: /* nop */ + break; + } } #define CPU_V001 cpu_V0, cpu_V0, cpu_V1 -static inline void gen_neon_add(int size, TCGv t0, TCGv t1) -{ +static inline void gen_neon_add(int size, TCGv t0, TCGv t1) { switch (size) { - case 0: gen_helper_neon_add_u8(t0, t0, t1); break; - case 1: gen_helper_neon_add_u16(t0, t0, t1); break; - case 2: tcg_gen_add_i32(t0, t0, t1); break; - default: abort(); + case 0: + gen_helper_neon_add_u8(t0, t0, t1); + break; + case 1: + gen_helper_neon_add_u16(t0, t0, t1); + break; + case 2: + tcg_gen_add_i32(t0, t0, t1); + break; + default: + abort(); } } -static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1) -{ +static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1) { switch (size) { - case 0: gen_helper_neon_sub_u8(t0, t1, t0); break; - case 1: gen_helper_neon_sub_u16(t0, t1, t0); break; - case 2: tcg_gen_sub_i32(t0, t1, t0); break; - default: return; + case 0: + gen_helper_neon_sub_u8(t0, t1, t0); + break; + case 1: + gen_helper_neon_sub_u16(t0, t1, t0); + break; + case 2: + tcg_gen_sub_i32(t0, t1, t0); + break; + default: + return; } } /* 32-bit pairwise ops end up the same as the elementwise versions. */ -#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32 -#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32 -#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32 -#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32 - -#define GEN_NEON_INTEGER_OP_ENV(name) do { \ - switch ((size << 1) | u) { \ - case 0: \ - gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \ - break; \ - case 1: \ - gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \ - break; \ - case 2: \ - gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \ - break; \ - case 3: \ - gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \ - break; \ - case 4: \ - gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \ - break; \ - case 5: \ - gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \ - break; \ - default: return 1; \ - }} while (0) - -#define GEN_NEON_INTEGER_OP(name) do { \ - switch ((size << 1) | u) { \ - case 0: \ - gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \ - break; \ - case 1: \ - gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \ - break; \ - case 2: \ - gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \ - break; \ - case 3: \ - gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \ - break; \ - case 4: \ - gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \ - break; \ - case 5: \ - gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \ - break; \ - default: return 1; \ - }} while (0) - -static TCGv neon_load_scratch(int scratch) -{ +#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32 +#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32 +#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32 +#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32 + +#define GEN_NEON_INTEGER_OP_ENV(name) \ + do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \ + break; \ + default: \ + return 1; \ + } \ + } while (0) + +#define GEN_NEON_INTEGER_OP(name) \ + do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \ + break; \ + default: \ + return 1; \ + } \ + } while (0) + +static TCGv neon_load_scratch(int scratch) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); return tmp; } -static void neon_store_scratch(int scratch, TCGv var) -{ +static void neon_store_scratch(int scratch, TCGv var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); tcg_temp_free_i32(var); } -static inline TCGv neon_get_scalar(int size, int reg) -{ +static inline TCGv neon_get_scalar(int size, int reg) { TCGv tmp; if (size == 1) { tmp = neon_load_reg(reg & 7, reg >> 4); @@ -3834,8 +3913,7 @@ static inline TCGv neon_get_scalar(int size, int reg) return tmp; } -static int gen_neon_unzip(int rd, int rm, int size, int q) -{ +static int gen_neon_unzip(int rd, int rm, int size, int q) { TCGv tmp, tmp2; if (!q && size == 2) { return 1; @@ -3844,28 +3922,28 @@ static int gen_neon_unzip(int rd, int rm, int size, int q) tmp2 = tcg_const_i32(rm); if (q) { switch (size) { - case 0: - gen_helper_neon_qunzip8(cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_qunzip16(cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qunzip32(cpu_env, tmp, tmp2); - break; - default: - abort(); + case 0: + gen_helper_neon_qunzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qunzip16(cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qunzip32(cpu_env, tmp, tmp2); + break; + default: + abort(); } } else { switch (size) { - case 0: - gen_helper_neon_unzip8(cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_unzip16(cpu_env, tmp, tmp2); - break; - default: - abort(); + case 0: + gen_helper_neon_unzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_unzip16(cpu_env, tmp, tmp2); + break; + default: + abort(); } } tcg_temp_free_i32(tmp); @@ -3873,8 +3951,7 @@ static int gen_neon_unzip(int rd, int rm, int size, int q) return 0; } -static int gen_neon_zip(int rd, int rm, int size, int q) -{ +static int gen_neon_zip(int rd, int rm, int size, int q) { TCGv tmp, tmp2; if (!q && size == 2) { return 1; @@ -3883,28 +3960,28 @@ static int gen_neon_zip(int rd, int rm, int size, int q) tmp2 = tcg_const_i32(rm); if (q) { switch (size) { - case 0: - gen_helper_neon_qzip8(cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_qzip16(cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qzip32(cpu_env, tmp, tmp2); - break; - default: - abort(); + case 0: + gen_helper_neon_qzip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qzip16(cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qzip32(cpu_env, tmp, tmp2); + break; + default: + abort(); } } else { switch (size) { - case 0: - gen_helper_neon_zip8(cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_zip16(cpu_env, tmp, tmp2); - break; - default: - abort(); + case 0: + gen_helper_neon_zip8(cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_zip16(cpu_env, tmp, tmp2); + break; + default: + abort(); } } tcg_temp_free_i32(tmp); @@ -3912,8 +3989,7 @@ static int gen_neon_zip(int rd, int rm, int size, int q) return 0; } -static void gen_neon_trn_u8(TCGv t0, TCGv t1) -{ +static void gen_neon_trn_u8(TCGv t0, TCGv t1) { TCGv rd, tmp; rd = tcg_temp_new_i32(); @@ -3934,8 +4010,7 @@ static void gen_neon_trn_u8(TCGv t0, TCGv t1) tcg_temp_free_i32(rd); } -static void gen_neon_trn_u16(TCGv t0, TCGv t1) -{ +static void gen_neon_trn_u16(TCGv t0, TCGv t1) { TCGv rd, tmp; rd = tcg_temp_new_i32(); @@ -3953,29 +4028,16 @@ static void gen_neon_trn_u16(TCGv t0, TCGv t1) tcg_temp_free_i32(rd); } - static struct { int nregs; int interleave; int spacing; -} neon_ls_element_type[11] = { - {4, 4, 1}, - {4, 4, 2}, - {4, 1, 1}, - {4, 2, 1}, - {3, 3, 1}, - {3, 3, 2}, - {3, 1, 1}, - {1, 1, 1}, - {2, 2, 1}, - {2, 2, 2}, - {2, 1, 1} -}; +} neon_ls_element_type[11] = {{4, 4, 1}, {4, 4, 2}, {4, 1, 1}, {4, 2, 1}, {3, 3, 1}, {3, 3, 2}, + {3, 1, 1}, {1, 1, 1}, {2, 2, 1}, {2, 2, 2}, {2, 1, 1}}; /* Translate a NEON load/store element instruction. Return nonzero if the instruction is invalid. */ -static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_neon_ls_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { int rd, rn, rm; int op; int nregs; @@ -3994,7 +4056,7 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) TCGv_i64 tmp64; if (!s->vfp_enabled) - return 1; + return 1; VFP_DREG_D(rd, insn); rn = (insn >> 16) & 0xf; rm = insn & 0xf; @@ -4007,18 +4069,18 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) return 1; /* Catch UNDEF cases for bad values of align field */ switch (op & 0xc) { - case 4: - if (((insn >> 5) & 1) == 1) { - return 1; - } - break; - case 8: - if (((insn >> 4) & 3) == 3) { - return 1; - } - break; - default: - break; + case 4: + if (((insn >> 5) & 1) == 1) { + return 1; + } + break; + case 8: + if (((insn >> 4) & 3) == 3) { + return 1; + } + break; + default: + break; } nregs = neon_ls_element_type[op].nregs; interleave = neon_ls_element_type[op].interleave; @@ -4168,47 +4230,46 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) int idx = (insn >> 4) & 0xf; pass = (insn >> 7) & 1; switch (size) { - case 0: - shift = ((insn >> 5) & 3) * 8; - stride = 1; - break; - case 1: - shift = ((insn >> 6) & 1) * 16; - stride = (insn & (1 << 5)) ? 2 : 1; - break; - case 2: - shift = 0; - stride = (insn & (1 << 6)) ? 2 : 1; - break; - default: - abort(); + case 0: + shift = ((insn >> 5) & 3) * 8; + stride = 1; + break; + case 1: + shift = ((insn >> 6) & 1) * 16; + stride = (insn & (1 << 5)) ? 2 : 1; + break; + case 2: + shift = 0; + stride = (insn & (1 << 6)) ? 2 : 1; + break; + default: + abort(); } nregs = ((insn >> 8) & 3) + 1; /* Catch the UNDEF cases. This is unavoidably a bit messy. */ switch (nregs) { - case 1: - if (((idx & (1 << size)) != 0) || - (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { - return 1; - } - break; - case 3: - if ((idx & 1) != 0) { - return 1; - } + case 1: + if (((idx & (1 << size)) != 0) || (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { + return 1; + } + break; + case 3: + if ((idx & 1) != 0) { + return 1; + } /* fall through */ - case 2: - if (size == 2 && (idx & 2) != 0) { - return 1; - } - break; - case 4: - if ((size == 2) && ((idx & 3) == 3)) { - return 1; - } - break; - default: - abort(); + case 2: + if (size == 2 && (idx & 2) != 0) { + return 1; + } + break; + case 4: + if ((size == 2) && ((idx & 3) == 3)) { + return 1; + } + break; + default: + abort(); } if ((rd + stride * (nregs - 1)) > 31) { /* Attempts to write off the end of the register file @@ -4222,17 +4283,17 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) for (reg = 0; reg < nregs; reg++) { if (load) { switch (size) { - case 0: - tmp = gen_ld8u(addr, IS_USER(s)); - break; - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 2: - tmp = gen_ld32(addr, IS_USER(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); } if (size != 2) { tmp2 = neon_load_reg(rd, pass); @@ -4245,15 +4306,15 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) if (shift) tcg_gen_shri_i32(tmp, tmp, shift); switch (size) { - case 0: - gen_st8(tmp, addr, IS_USER(s)); - break; - case 1: - gen_st16(tmp, addr, IS_USER(s)); - break; - case 2: - gen_st32(tmp, addr, IS_USER(s)); - break; + case 0: + gen_st8(tmp, addr, IS_USER(s)); + break; + case 1: + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: + gen_st32(tmp, addr, IS_USER(s)); + break; } } rd += stride; @@ -4281,166 +4342,250 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn) } /* Bitwise select. dest = c ? t : f. Clobbers T and F. */ -static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c) -{ +static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c) { tcg_gen_and_i32(t, t, c); tcg_gen_andc_i32(f, f, c); tcg_gen_or_i32(dest, t, f); } -static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src) -{ +static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src) { switch (size) { - case 0: gen_helper_neon_narrow_u8(dest, src); break; - case 1: gen_helper_neon_narrow_u16(dest, src); break; - case 2: tcg_gen_trunc_i64_i32(dest, src); break; - default: abort(); + case 0: + gen_helper_neon_narrow_u8(dest, src); + break; + case 1: + gen_helper_neon_narrow_u16(dest, src); + break; + case 2: + tcg_gen_trunc_i64_i32(dest, src); + break; + default: + abort(); } } -static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src) -{ +static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src) { switch (size) { - case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break; - case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break; - case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break; - default: abort(); + case 0: + gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); + break; + case 1: + gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); + break; + case 2: + gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); + break; + default: + abort(); } } -static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src) -{ +static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src) { switch (size) { - case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break; - case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break; - case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break; - default: abort(); + case 0: + gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); + break; + case 1: + gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); + break; + case 2: + gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); + break; + default: + abort(); } } -static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src) -{ +static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src) { switch (size) { - case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break; - case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break; - case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break; - default: abort(); + case 0: + gen_helper_neon_unarrow_sat8(dest, cpu_env, src); + break; + case 1: + gen_helper_neon_unarrow_sat16(dest, cpu_env, src); + break; + case 2: + gen_helper_neon_unarrow_sat32(dest, cpu_env, src); + break; + default: + abort(); } } -static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift, - int q, int u) -{ +static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift, int q, int u) { if (q) { if (u) { switch (size) { - case 1: gen_helper_neon_rshl_u16(var, var, shift); break; - case 2: gen_helper_neon_rshl_u32(var, var, shift); break; - default: abort(); + case 1: + gen_helper_neon_rshl_u16(var, var, shift); + break; + case 2: + gen_helper_neon_rshl_u32(var, var, shift); + break; + default: + abort(); } } else { switch (size) { - case 1: gen_helper_neon_rshl_s16(var, var, shift); break; - case 2: gen_helper_neon_rshl_s32(var, var, shift); break; - default: abort(); + case 1: + gen_helper_neon_rshl_s16(var, var, shift); + break; + case 2: + gen_helper_neon_rshl_s32(var, var, shift); + break; + default: + abort(); } } } else { if (u) { switch (size) { - case 1: gen_helper_neon_shl_u16(var, var, shift); break; - case 2: gen_helper_neon_shl_u32(var, var, shift); break; - default: abort(); + case 1: + gen_helper_neon_shl_u16(var, var, shift); + break; + case 2: + gen_helper_neon_shl_u32(var, var, shift); + break; + default: + abort(); } } else { switch (size) { - case 1: gen_helper_neon_shl_s16(var, var, shift); break; - case 2: gen_helper_neon_shl_s32(var, var, shift); break; - default: abort(); + case 1: + gen_helper_neon_shl_s16(var, var, shift); + break; + case 2: + gen_helper_neon_shl_s32(var, var, shift); + break; + default: + abort(); } } } } -static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u) -{ +static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u) { if (u) { switch (size) { - case 0: gen_helper_neon_widen_u8(dest, src); break; - case 1: gen_helper_neon_widen_u16(dest, src); break; - case 2: tcg_gen_extu_i32_i64(dest, src); break; - default: abort(); + case 0: + gen_helper_neon_widen_u8(dest, src); + break; + case 1: + gen_helper_neon_widen_u16(dest, src); + break; + case 2: + tcg_gen_extu_i32_i64(dest, src); + break; + default: + abort(); } } else { switch (size) { - case 0: gen_helper_neon_widen_s8(dest, src); break; - case 1: gen_helper_neon_widen_s16(dest, src); break; - case 2: tcg_gen_ext_i32_i64(dest, src); break; - default: abort(); - } - } + case 0: + gen_helper_neon_widen_s8(dest, src); + break; + case 1: + gen_helper_neon_widen_s16(dest, src); + break; + case 2: + tcg_gen_ext_i32_i64(dest, src); + break; + default: + abort(); + } + } tcg_temp_free_i32(src); } -static inline void gen_neon_addl(int size) -{ +static inline void gen_neon_addl(int size) { switch (size) { - case 0: gen_helper_neon_addl_u16(CPU_V001); break; - case 1: gen_helper_neon_addl_u32(CPU_V001); break; - case 2: tcg_gen_add_i64(CPU_V001); break; - default: abort(); + case 0: + gen_helper_neon_addl_u16(CPU_V001); + break; + case 1: + gen_helper_neon_addl_u32(CPU_V001); + break; + case 2: + tcg_gen_add_i64(CPU_V001); + break; + default: + abort(); } } -static inline void gen_neon_subl(int size) -{ +static inline void gen_neon_subl(int size) { switch (size) { - case 0: gen_helper_neon_subl_u16(CPU_V001); break; - case 1: gen_helper_neon_subl_u32(CPU_V001); break; - case 2: tcg_gen_sub_i64(CPU_V001); break; - default: abort(); + case 0: + gen_helper_neon_subl_u16(CPU_V001); + break; + case 1: + gen_helper_neon_subl_u32(CPU_V001); + break; + case 2: + tcg_gen_sub_i64(CPU_V001); + break; + default: + abort(); } } -static inline void gen_neon_negl(TCGv_i64 var, int size) -{ +static inline void gen_neon_negl(TCGv_i64 var, int size) { switch (size) { - case 0: gen_helper_neon_negl_u16(var, var); break; - case 1: gen_helper_neon_negl_u32(var, var); break; - case 2: gen_helper_neon_negl_u64(var, var); break; - default: abort(); + case 0: + gen_helper_neon_negl_u16(var, var); + break; + case 1: + gen_helper_neon_negl_u32(var, var); + break; + case 2: + gen_helper_neon_negl_u64(var, var); + break; + default: + abort(); } } -static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size) -{ +static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size) { switch (size) { - case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break; - case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break; - default: abort(); + case 1: + gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); + break; + case 2: + gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); + break; + default: + abort(); } } -static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u) -{ +static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u) { TCGv_i64 tmp; switch ((size << 1) | u) { - case 0: gen_helper_neon_mull_s8(dest, a, b); break; - case 1: gen_helper_neon_mull_u8(dest, a, b); break; - case 2: gen_helper_neon_mull_s16(dest, a, b); break; - case 3: gen_helper_neon_mull_u16(dest, a, b); break; - case 4: - tmp = gen_muls_i64_i32(a, b); - tcg_gen_mov_i64(dest, tmp); - tcg_temp_free_i64(tmp); - break; - case 5: - tmp = gen_mulu_i64_i32(a, b); - tcg_gen_mov_i64(dest, tmp); - tcg_temp_free_i64(tmp); - break; - default: abort(); + case 0: + gen_helper_neon_mull_s8(dest, a, b); + break; + case 1: + gen_helper_neon_mull_u8(dest, a, b); + break; + case 2: + gen_helper_neon_mull_s16(dest, a, b); + break; + case 3: + gen_helper_neon_mull_u16(dest, a, b); + break; + case 4: + tmp = gen_muls_i64_i32(a, b); + tcg_gen_mov_i64(dest, tmp); + tcg_temp_free_i64(tmp); + break; + case 5: + tmp = gen_mulu_i64_i32(a, b); + tcg_gen_mov_i64(dest, tmp); + tcg_temp_free_i64(tmp); + break; + default: + abort(); } /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. @@ -4451,8 +4596,7 @@ static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u) } } -static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src) -{ +static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src) { if (op) { if (u) { gen_neon_unarrow_sats(size, dest, src); @@ -4496,46 +4640,46 @@ static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src) #define NEON_3R_VPMIN 21 #define NEON_3R_VQDMULH_VQRDMULH 22 #define NEON_3R_VPADD 23 -#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ -#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ +#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ +#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ -#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ -#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ -#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ +#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ +#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ +#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */ static const uint8_t neon_3r_sizes[] = { - [NEON_3R_VHADD] = 0x7, - [NEON_3R_VQADD] = 0xf, - [NEON_3R_VRHADD] = 0x7, - [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */ - [NEON_3R_VHSUB] = 0x7, - [NEON_3R_VQSUB] = 0xf, - [NEON_3R_VCGT] = 0x7, - [NEON_3R_VCGE] = 0x7, - [NEON_3R_VSHL] = 0xf, - [NEON_3R_VQSHL] = 0xf, - [NEON_3R_VRSHL] = 0xf, - [NEON_3R_VQRSHL] = 0xf, - [NEON_3R_VMAX] = 0x7, - [NEON_3R_VMIN] = 0x7, - [NEON_3R_VABD] = 0x7, - [NEON_3R_VABA] = 0x7, - [NEON_3R_VADD_VSUB] = 0xf, - [NEON_3R_VTST_VCEQ] = 0x7, - [NEON_3R_VML] = 0x7, - [NEON_3R_VMUL] = 0x7, - [NEON_3R_VPMAX] = 0x7, - [NEON_3R_VPMIN] = 0x7, - [NEON_3R_VQDMULH_VQRDMULH] = 0x6, - [NEON_3R_VPADD] = 0x7, - [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */ - [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_VHADD] = 0x7, + [NEON_3R_VQADD] = 0xf, + [NEON_3R_VRHADD] = 0x7, + [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */ + [NEON_3R_VHSUB] = 0x7, + [NEON_3R_VQSUB] = 0xf, + [NEON_3R_VCGT] = 0x7, + [NEON_3R_VCGE] = 0x7, + [NEON_3R_VSHL] = 0xf, + [NEON_3R_VQSHL] = 0xf, + [NEON_3R_VRSHL] = 0xf, + [NEON_3R_VQRSHL] = 0xf, + [NEON_3R_VMAX] = 0x7, + [NEON_3R_VMIN] = 0x7, + [NEON_3R_VABD] = 0x7, + [NEON_3R_VABA] = 0x7, + [NEON_3R_VADD_VSUB] = 0xf, + [NEON_3R_VTST_VCEQ] = 0x7, + [NEON_3R_VML] = 0x7, + [NEON_3R_VMUL] = 0x7, + [NEON_3R_VPMAX] = 0x7, + [NEON_3R_VPMIN] = 0x7, + [NEON_3R_VQDMULH_VQRDMULH] = 0x6, + [NEON_3R_VPADD] = 0x7, + [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */ }; /* Symbolic constants for op fields for Neon 2-register miscellaneous. @@ -4573,7 +4717,7 @@ static const uint8_t neon_3r_sizes[] = { #define NEON_2RM_VTRN 33 #define NEON_2RM_VUZP 34 #define NEON_2RM_VZIP 35 -#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ +#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ #define NEON_2RM_VSHLL 38 #define NEON_2RM_VCVT_F16_F32 44 @@ -4587,11 +4731,9 @@ static const uint8_t neon_3r_sizes[] = { #define NEON_2RM_VCVT_SF 62 #define NEON_2RM_VCVT_UF 63 -static int neon_2rm_is_float_op(int op) -{ +static int neon_2rm_is_float_op(int op) { /* Return true if this neon 2reg-misc op is float-to-float */ - return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F || - op >= NEON_2RM_VRECPE_F); + return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F || op >= NEON_2RM_VRECPE_F); } /* Each entry in this array has bit n set if the insn allows @@ -4599,50 +4741,21 @@ static int neon_2rm_is_float_op(int op) * op values will have no bits set they always UNDEF. */ static const uint8_t neon_2rm_sizes[] = { - [NEON_2RM_VREV64] = 0x7, - [NEON_2RM_VREV32] = 0x3, - [NEON_2RM_VREV16] = 0x1, - [NEON_2RM_VPADDL] = 0x7, - [NEON_2RM_VPADDL_U] = 0x7, - [NEON_2RM_VCLS] = 0x7, - [NEON_2RM_VCLZ] = 0x7, - [NEON_2RM_VCNT] = 0x1, - [NEON_2RM_VMVN] = 0x1, - [NEON_2RM_VPADAL] = 0x7, - [NEON_2RM_VPADAL_U] = 0x7, - [NEON_2RM_VQABS] = 0x7, - [NEON_2RM_VQNEG] = 0x7, - [NEON_2RM_VCGT0] = 0x7, - [NEON_2RM_VCGE0] = 0x7, - [NEON_2RM_VCEQ0] = 0x7, - [NEON_2RM_VCLE0] = 0x7, - [NEON_2RM_VCLT0] = 0x7, - [NEON_2RM_VABS] = 0x7, - [NEON_2RM_VNEG] = 0x7, - [NEON_2RM_VCGT0_F] = 0x4, - [NEON_2RM_VCGE0_F] = 0x4, - [NEON_2RM_VCEQ0_F] = 0x4, - [NEON_2RM_VCLE0_F] = 0x4, - [NEON_2RM_VCLT0_F] = 0x4, - [NEON_2RM_VABS_F] = 0x4, - [NEON_2RM_VNEG_F] = 0x4, - [NEON_2RM_VSWP] = 0x1, - [NEON_2RM_VTRN] = 0x7, - [NEON_2RM_VUZP] = 0x7, - [NEON_2RM_VZIP] = 0x7, - [NEON_2RM_VMOVN] = 0x7, - [NEON_2RM_VQMOVN] = 0x7, - [NEON_2RM_VSHLL] = 0x7, - [NEON_2RM_VCVT_F16_F32] = 0x2, - [NEON_2RM_VCVT_F32_F16] = 0x2, - [NEON_2RM_VRECPE] = 0x4, - [NEON_2RM_VRSQRTE] = 0x4, - [NEON_2RM_VRECPE_F] = 0x4, - [NEON_2RM_VRSQRTE_F] = 0x4, - [NEON_2RM_VCVT_FS] = 0x4, - [NEON_2RM_VCVT_FU] = 0x4, - [NEON_2RM_VCVT_SF] = 0x4, - [NEON_2RM_VCVT_UF] = 0x4, + [NEON_2RM_VREV64] = 0x7, [NEON_2RM_VREV32] = 0x3, [NEON_2RM_VREV16] = 0x1, + [NEON_2RM_VPADDL] = 0x7, [NEON_2RM_VPADDL_U] = 0x7, [NEON_2RM_VCLS] = 0x7, + [NEON_2RM_VCLZ] = 0x7, [NEON_2RM_VCNT] = 0x1, [NEON_2RM_VMVN] = 0x1, + [NEON_2RM_VPADAL] = 0x7, [NEON_2RM_VPADAL_U] = 0x7, [NEON_2RM_VQABS] = 0x7, + [NEON_2RM_VQNEG] = 0x7, [NEON_2RM_VCGT0] = 0x7, [NEON_2RM_VCGE0] = 0x7, + [NEON_2RM_VCEQ0] = 0x7, [NEON_2RM_VCLE0] = 0x7, [NEON_2RM_VCLT0] = 0x7, + [NEON_2RM_VABS] = 0x7, [NEON_2RM_VNEG] = 0x7, [NEON_2RM_VCGT0_F] = 0x4, + [NEON_2RM_VCGE0_F] = 0x4, [NEON_2RM_VCEQ0_F] = 0x4, [NEON_2RM_VCLE0_F] = 0x4, + [NEON_2RM_VCLT0_F] = 0x4, [NEON_2RM_VABS_F] = 0x4, [NEON_2RM_VNEG_F] = 0x4, + [NEON_2RM_VSWP] = 0x1, [NEON_2RM_VTRN] = 0x7, [NEON_2RM_VUZP] = 0x7, + [NEON_2RM_VZIP] = 0x7, [NEON_2RM_VMOVN] = 0x7, [NEON_2RM_VQMOVN] = 0x7, + [NEON_2RM_VSHLL] = 0x7, [NEON_2RM_VCVT_F16_F32] = 0x2, [NEON_2RM_VCVT_F32_F16] = 0x2, + [NEON_2RM_VRECPE] = 0x4, [NEON_2RM_VRSQRTE] = 0x4, [NEON_2RM_VRECPE_F] = 0x4, + [NEON_2RM_VRSQRTE_F] = 0x4, [NEON_2RM_VCVT_FS] = 0x4, [NEON_2RM_VCVT_FU] = 0x4, + [NEON_2RM_VCVT_SF] = 0x4, [NEON_2RM_VCVT_UF] = 0x4, }; /* Translate a NEON data processing instruction. Return nonzero if the @@ -4650,8 +4763,7 @@ static const uint8_t neon_2rm_sizes[] = { We process data in a mixture of 32-bit and 64-bit chunks. Mostly we use 32-bit chunks so we can use normal scalar instructions. */ -static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { int op; int q; int rd, rn, rm; @@ -4666,7 +4778,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins TCGv_i64 tmp64; if (!s->vfp_enabled) - return 1; + return 1; q = (insn & (1 << 6)) != 0; u = (insn >> 24) & 1; VFP_DREG_D(rd, insn); @@ -4692,65 +4804,57 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins neon_load_reg64(cpu_V0, rn + pass); neon_load_reg64(cpu_V1, rm + pass); switch (op) { - case NEON_3R_VQADD: - if (u) { - gen_helper_neon_qadd_u64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } else { - gen_helper_neon_qadd_s64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } - break; - case NEON_3R_VQSUB: - if (u) { - gen_helper_neon_qsub_u64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } else { - gen_helper_neon_qsub_s64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } - break; - case NEON_3R_VSHL: - if (u) { - gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0); - } else { - gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0); - } - break; - case NEON_3R_VQSHL: - if (u) { - gen_helper_neon_qshl_u64(cpu_V0, cpu_env, - cpu_V1, cpu_V0); - } else { - gen_helper_neon_qshl_s64(cpu_V0, cpu_env, - cpu_V1, cpu_V0); - } - break; - case NEON_3R_VRSHL: - if (u) { - gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0); - } else { - gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0); - } - break; - case NEON_3R_VQRSHL: - if (u) { - gen_helper_neon_qrshl_u64(cpu_V0, cpu_env, - cpu_V1, cpu_V0); - } else { - gen_helper_neon_qrshl_s64(cpu_V0, cpu_env, - cpu_V1, cpu_V0); - } - break; - case NEON_3R_VADD_VSUB: - if (u) { - tcg_gen_sub_i64(CPU_V001); - } else { - tcg_gen_add_i64(CPU_V001); - } - break; - default: - abort(); + case NEON_3R_VQADD: + if (u) { + gen_helper_neon_qadd_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } else { + gen_helper_neon_qadd_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } + break; + case NEON_3R_VQSUB: + if (u) { + gen_helper_neon_qsub_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } else { + gen_helper_neon_qsub_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } + break; + case NEON_3R_VSHL: + if (u) { + gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0); + } else { + gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VQSHL: + if (u) { + gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V1, cpu_V0); + } else { + gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VRSHL: + if (u) { + gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0); + } else { + gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VQRSHL: + if (u) { + gen_helper_neon_qrshl_u64(cpu_V0, cpu_env, cpu_V1, cpu_V0); + } else { + gen_helper_neon_qrshl_s64(cpu_V0, cpu_env, cpu_V1, cpu_V0); + } + break; + case NEON_3R_VADD_VSUB: + if (u) { + tcg_gen_sub_i64(CPU_V001); + } else { + tcg_gen_add_i64(CPU_V001); + } + break; + default: + abort(); } neon_store_reg64(cpu_V0, rd + pass); } @@ -4758,367 +4862,404 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins } pairwise = 0; switch (op) { - case NEON_3R_VSHL: - case NEON_3R_VQSHL: - case NEON_3R_VRSHL: - case NEON_3R_VQRSHL: - { + case NEON_3R_VSHL: + case NEON_3R_VQSHL: + case NEON_3R_VRSHL: + case NEON_3R_VQRSHL: { int rtmp; /* Shift instruction operands are reversed. */ rtmp = rn; rn = rm; rm = rtmp; - } - break; - case NEON_3R_VPADD: - if (u) { - return 1; - } + } break; + case NEON_3R_VPADD: + if (u) { + return 1; + } /* Fall through */ - case NEON_3R_VPMAX: - case NEON_3R_VPMIN: - pairwise = 1; - break; - case NEON_3R_FLOAT_ARITH: - pairwise = (u && size < 2); /* if VPADD (float) */ - break; - case NEON_3R_FLOAT_MINMAX: - pairwise = u; /* if VPMIN/VPMAX (float) */ - break; - case NEON_3R_FLOAT_CMP: - if (!u && size) { - /* no encoding for U=0 C=1x */ - return 1; - } - break; - case NEON_3R_FLOAT_ACMP: - if (!u) { - return 1; - } - break; - case NEON_3R_VRECPS_VRSQRTS: - if (u) { - return 1; - } - break; - case NEON_3R_VMUL: - if (u && (size != 0)) { - /* UNDEF on invalid size for polynomial subcase */ - return 1; - } - break; - case NEON_3R_VFM: - if (!arm_feature(env, ARM_FEATURE_VFP4) || u) { - return 1; - } - break; - default: - break; - } - - if (pairwise && q) { - /* All the pairwise insns UNDEF if Q is set */ - return 1; - } - - for (pass = 0; pass < (q ? 4 : 2); pass++) { - - if (pairwise) { - /* Pairwise. */ - if (pass < 1) { - tmp = neon_load_reg(rn, 0); - tmp2 = neon_load_reg(rn, 1); - } else { - tmp = neon_load_reg(rm, 0); - tmp2 = neon_load_reg(rm, 1); - } - } else { - /* Elementwise. */ - tmp = neon_load_reg(rn, pass); - tmp2 = neon_load_reg(rm, pass); - } - switch (op) { - case NEON_3R_VHADD: - GEN_NEON_INTEGER_OP(hadd); - break; - case NEON_3R_VQADD: - GEN_NEON_INTEGER_OP_ENV(qadd); - break; - case NEON_3R_VRHADD: - GEN_NEON_INTEGER_OP(rhadd); - break; - case NEON_3R_LOGIC: /* Logic ops. */ - switch ((u << 2) | size) { - case 0: /* VAND */ - tcg_gen_and_i32(tmp, tmp, tmp2); - break; - case 1: /* BIC */ - tcg_gen_andc_i32(tmp, tmp, tmp2); - break; - case 2: /* VORR */ - tcg_gen_or_i32(tmp, tmp, tmp2); - break; - case 3: /* VORN */ - tcg_gen_orc_i32(tmp, tmp, tmp2); - break; - case 4: /* VEOR */ - tcg_gen_xor_i32(tmp, tmp, tmp2); - break; - case 5: /* VBSL */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp2, tmp3); - tcg_temp_free_i32(tmp3); + case NEON_3R_VPMAX: + case NEON_3R_VPMIN: + pairwise = 1; break; - case 6: /* VBIT */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp3, tmp2); - tcg_temp_free_i32(tmp3); + case NEON_3R_FLOAT_ARITH: + pairwise = (u && size < 2); /* if VPADD (float) */ break; - case 7: /* VBIF */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tmp3); + case NEON_3R_FLOAT_MINMAX: + pairwise = u; /* if VPMIN/VPMAX (float) */ break; - } - break; - case NEON_3R_VHSUB: - GEN_NEON_INTEGER_OP(hsub); - break; - case NEON_3R_VQSUB: - GEN_NEON_INTEGER_OP_ENV(qsub); - break; - case NEON_3R_VCGT: - GEN_NEON_INTEGER_OP(cgt); - break; - case NEON_3R_VCGE: - GEN_NEON_INTEGER_OP(cge); - break; - case NEON_3R_VSHL: - GEN_NEON_INTEGER_OP(shl); - break; - case NEON_3R_VQSHL: - GEN_NEON_INTEGER_OP_ENV(qshl); - break; - case NEON_3R_VRSHL: - GEN_NEON_INTEGER_OP(rshl); - break; - case NEON_3R_VQRSHL: - //GEN_NEON_INTEGER_OP_ENV(qrshl); - break; - case NEON_3R_VMAX: - GEN_NEON_INTEGER_OP(max); - break; - case NEON_3R_VMIN: - GEN_NEON_INTEGER_OP(min); - break; - case NEON_3R_VABD: - GEN_NEON_INTEGER_OP(abd); - break; - case NEON_3R_VABA: - GEN_NEON_INTEGER_OP(abd); - tcg_temp_free_i32(tmp2); - tmp2 = neon_load_reg(rd, pass); - gen_neon_add(size, tmp, tmp2); - break; - case NEON_3R_VADD_VSUB: - if (!u) { /* VADD */ - gen_neon_add(size, tmp, tmp2); - } else { /* VSUB */ - switch (size) { - case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VTST_VCEQ: - if (!u) { /* VTST */ - switch (size) { - case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; - default: abort(); - } - } else { /* VCEQ */ - switch (size) { - case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tmp2); - tmp2 = neon_load_reg(rd, pass); - if (u) { /* VMLS */ - gen_neon_rsb(size, tmp, tmp2); - } else { /* VMLA */ - gen_neon_add(size, tmp, tmp2); - } - break; - case NEON_3R_VMUL: - if (u) { /* polynomial */ - gen_helper_neon_mul_p8(tmp, tmp, tmp2); - } else { /* Integer */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); + case NEON_3R_FLOAT_CMP: + if (!u && size) { + /* no encoding for U=0 C=1x */ + return 1; } - } - break; - case NEON_3R_VPMAX: - GEN_NEON_INTEGER_OP(pmax); - break; - case NEON_3R_VPMIN: - GEN_NEON_INTEGER_OP(pmin); - break; - case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ - if (!u) { /* VQDMULH */ - switch (size) { - case 1: - gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); - break; - default: abort(); + break; + case NEON_3R_FLOAT_ACMP: + if (!u) { + return 1; } - } else { /* VQRDMULH */ - switch (size) { - case 1: - gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); - break; - default: abort(); + break; + case NEON_3R_VRECPS_VRSQRTS: + if (u) { + return 1; } - } - break; - case NEON_3R_VPADD: - switch (size) { - case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break; - default: abort(); - } - break; - case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - switch ((u << 2) | size) { - case 0: /* VADD */ - case 4: /* VPADD */ - gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); break; - case 2: /* VSUB */ - gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus); + case NEON_3R_VMUL: + if (u && (size != 0)) { + /* UNDEF on invalid size for polynomial subcase */ + return 1; + } break; - case 6: /* VABD */ - gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus); + case NEON_3R_VFM: + if (!arm_feature(env, ARM_FEATURE_VFP4) || u) { + return 1; + } break; default: - abort(); - } - tcg_temp_free_ptr(fpstatus); - break; + break; } - case NEON_3R_FLOAT_MULTIPLY: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); - if (!u) { - tcg_temp_free_i32(tmp2); - tmp2 = neon_load_reg(rd, pass); - if (size == 0) { - gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); - } - } - tcg_temp_free_ptr(fpstatus); - break; + + if (pairwise && q) { + /* All the pairwise insns UNDEF if Q is set */ + return 1; } - case NEON_3R_FLOAT_CMP: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - if (!u) { - gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); - } else { - if (size == 0) { - gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + + if (pairwise) { + /* Pairwise. */ + if (pass < 1) { + tmp = neon_load_reg(rn, 0); + tmp2 = neon_load_reg(rn, 1); } else { - gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); } - } - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_3R_FLOAT_ACMP: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - if (size == 0) { - gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus); - } - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_3R_FLOAT_MINMAX: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - if (size == 0) { - gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus); } else { - gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus); + /* Elementwise. */ + tmp = neon_load_reg(rn, pass); + tmp2 = neon_load_reg(rm, pass); } - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_3R_VRECPS_VRSQRTS: - if (size == 0) - gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); - else - gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); - break; - case NEON_3R_VFM: - { - /* VFMA, VFMS: fused multiply-add */ - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - TCGv_i32 tmp3 = neon_load_reg(rd, pass); - if (size) { - /* VFMS */ - gen_helper_vfp_negs(tmp, tmp); + switch (op) { + case NEON_3R_VHADD: + GEN_NEON_INTEGER_OP(hadd); + break; + case NEON_3R_VQADD: + GEN_NEON_INTEGER_OP_ENV(qadd); + break; + case NEON_3R_VRHADD: + GEN_NEON_INTEGER_OP(rhadd); + break; + case NEON_3R_LOGIC: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + case 1: /* BIC */ + tcg_gen_andc_i32(tmp, tmp, tmp2); + break; + case 2: /* VORR */ + tcg_gen_or_i32(tmp, tmp, tmp2); + break; + case 3: /* VORN */ + tcg_gen_orc_i32(tmp, tmp, tmp2); + break; + case 4: /* VEOR */ + tcg_gen_xor_i32(tmp, tmp, tmp2); + break; + case 5: /* VBSL */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp2, tmp3); + tcg_temp_free_i32(tmp3); + break; + case 6: /* VBIT */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp3, tmp2); + tcg_temp_free_i32(tmp3); + break; + case 7: /* VBIF */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + break; + } + break; + case NEON_3R_VHSUB: + GEN_NEON_INTEGER_OP(hsub); + break; + case NEON_3R_VQSUB: + GEN_NEON_INTEGER_OP_ENV(qsub); + break; + case NEON_3R_VCGT: + GEN_NEON_INTEGER_OP(cgt); + break; + case NEON_3R_VCGE: + GEN_NEON_INTEGER_OP(cge); + break; + case NEON_3R_VSHL: + GEN_NEON_INTEGER_OP(shl); + break; + case NEON_3R_VQSHL: + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + case NEON_3R_VRSHL: + GEN_NEON_INTEGER_OP(rshl); + break; + case NEON_3R_VQRSHL: + // GEN_NEON_INTEGER_OP_ENV(qrshl); + break; + case NEON_3R_VMAX: + GEN_NEON_INTEGER_OP(max); + break; + case NEON_3R_VMIN: + GEN_NEON_INTEGER_OP(min); + break; + case NEON_3R_VABD: + GEN_NEON_INTEGER_OP(abd); + break; + case NEON_3R_VABA: + GEN_NEON_INTEGER_OP(abd); + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + gen_neon_add(size, tmp, tmp2); + break; + case NEON_3R_VADD_VSUB: + if (!u) { /* VADD */ + gen_neon_add(size, tmp, tmp2); + } else { /* VSUB */ + switch (size) { + case 0: + gen_helper_neon_sub_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_sub_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_sub_i32(tmp, tmp, tmp2); + break; + default: + abort(); + } + } + break; + case NEON_3R_VTST_VCEQ: + if (!u) { /* VTST */ + switch (size) { + case 0: + gen_helper_neon_tst_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_tst_u16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_tst_u32(tmp, tmp, tmp2); + break; + default: + abort(); + } + } else { /* VCEQ */ + switch (size) { + case 0: + gen_helper_neon_ceq_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_ceq_u16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_ceq_u32(tmp, tmp, tmp2); + break; + default: + abort(); + } + } + break; + case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ + switch (size) { + case 0: + gen_helper_neon_mul_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_mul_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_mul_i32(tmp, tmp, tmp2); + break; + default: + abort(); + } + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (u) { /* VMLS */ + gen_neon_rsb(size, tmp, tmp2); + } else { /* VMLA */ + gen_neon_add(size, tmp, tmp2); + } + break; + case NEON_3R_VMUL: + if (u) { /* polynomial */ + gen_helper_neon_mul_p8(tmp, tmp, tmp2); + } else { /* Integer */ + switch (size) { + case 0: + gen_helper_neon_mul_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_mul_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_mul_i32(tmp, tmp, tmp2); + break; + default: + abort(); + } + } + break; + case NEON_3R_VPMAX: + GEN_NEON_INTEGER_OP(pmax); + break; + case NEON_3R_VPMIN: + GEN_NEON_INTEGER_OP(pmin); + break; + case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ + if (!u) { /* VQDMULH */ + switch (size) { + case 1: + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } else { /* VQRDMULH */ + switch (size) { + case 1: + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } + break; + case NEON_3R_VPADD: + switch (size) { + case 0: + gen_helper_neon_padd_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_padd_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_add_i32(tmp, tmp, tmp2); + break; + default: + abort(); + } + break; + case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + switch ((u << 2) | size) { + case 0: /* VADD */ + case 4: /* VPADD */ + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + break; + case 2: /* VSUB */ + gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus); + break; + case 6: /* VABD */ + gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus); + break; + default: + abort(); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_MULTIPLY: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); + if (!u) { + tcg_temp_free_i32(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (size == 0) { + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); + } + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_CMP: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (!u) { + gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); + } else { + if (size == 0) { + gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); + } + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_ACMP: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (size == 0) { + gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_FLOAT_MINMAX: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + if (size == 0) { + gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_3R_VRECPS_VRSQRTS: + if (size == 0) + gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); + else + gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); + break; + case NEON_3R_VFM: { + /* VFMA, VFMS: fused multiply-add */ + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + TCGv_i32 tmp3 = neon_load_reg(rd, pass); + if (size) { + /* VFMS */ + gen_helper_vfp_negs(tmp, tmp); + } + gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus); + tcg_temp_free_i32(tmp3); + tcg_temp_free_ptr(fpstatus); + break; + } + default: + abort(); } - gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus); - tcg_temp_free_i32(tmp3); - tcg_temp_free_ptr(fpstatus); - break; - } - default: - abort(); - } - tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp2); - /* Save the result. For elementwise operations we can put it - straight into the destination register. For pairwise operations - we have to be careful to avoid clobbering the source operands. */ - if (pairwise && rd == rm) { - neon_store_scratch(pass, tmp); - } else { - neon_store_reg(rd, pass, tmp); - } + /* Save the result. For elementwise operations we can put it + straight into the destination register. For pairwise operations + we have to be careful to avoid clobbering the source operands. */ + if (pairwise && rd == rm) { + neon_store_scratch(pass, tmp); + } else { + neon_store_reg(rd, pass, tmp); + } } /* for pass */ if (pairwise && rd == rm) { @@ -5162,24 +5303,24 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins if (size == 3) { count = q + 1; } else { - count = q ? 4: 2; + count = q ? 4 : 2; } switch (size) { - case 0: - imm = (uint8_t) shift; - imm |= imm << 8; - imm |= imm << 16; - break; - case 1: - imm = (uint16_t) shift; - imm |= imm << 16; - break; - case 2: - case 3: - imm = shift; - break; - default: - abort(); + case 0: + imm = (uint8_t) shift; + imm |= imm << 8; + imm |= imm << 16; + break; + case 1: + imm = (uint16_t) shift; + imm |= imm << 16; + break; + case 2: + case 3: + imm = shift; + break; + default: + abort(); } for (pass = 0; pass < count; pass++) { @@ -5187,37 +5328,34 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins neon_load_reg64(cpu_V0, rm + pass); tcg_gen_movi_i64(cpu_V1, imm); switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - if (u) + case 0: /* VSHR */ + case 1: /* VSRA */ + if (u) + gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); + else + gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); + break; + case 2: /* VRSHR */ + case 3: /* VRSRA */ + if (u) + gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1); + else + gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); + break; + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); - else - gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); - break; - case 2: /* VRSHR */ - case 3: /* VRSRA */ - if (u) - gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1); - else - gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); - break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); - break; - case 6: /* VQSHLU */ - gen_helper_neon_qshlu_s64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - break; - case 7: /* VQSHL */ - if (u) { - gen_helper_neon_qshl_u64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } else { - gen_helper_neon_qshl_s64(cpu_V0, cpu_env, - cpu_V0, cpu_V1); - } - break; + break; + case 6: /* VQSHLU */ + gen_helper_neon_qshlu_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + break; + case 7: /* VQSHL */ + if (u) { + gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } else { + gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + } + break; } if (op == 1 || op == 3) { /* Accumulate. */ @@ -5246,44 +5384,48 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, imm); switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - GEN_NEON_INTEGER_OP(shl); - break; - case 2: /* VRSHR */ - case 3: /* VRSRA */ - GEN_NEON_INTEGER_OP(rshl); - break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - switch (size) { - case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; - default: abort(); - } - break; - case 6: /* VQSHLU */ - switch (size) { - case 0: - gen_helper_neon_qshlu_s8(tmp, cpu_env, - tmp, tmp2); + case 0: /* VSHR */ + case 1: /* VSRA */ + GEN_NEON_INTEGER_OP(shl); break; - case 1: - gen_helper_neon_qshlu_s16(tmp, cpu_env, - tmp, tmp2); + case 2: /* VRSHR */ + case 3: /* VRSRA */ + GEN_NEON_INTEGER_OP(rshl); break; - case 2: - gen_helper_neon_qshlu_s32(tmp, cpu_env, - tmp, tmp2); + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ + switch (size) { + case 0: + gen_helper_neon_shl_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_shl_u16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_shl_u32(tmp, tmp, tmp2); + break; + default: + abort(); + } + break; + case 6: /* VQSHLU */ + switch (size) { + case 0: + gen_helper_neon_qshlu_s8(tmp, cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qshlu_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qshlu_s32(tmp, cpu_env, tmp, tmp2); + break; + default: + abort(); + } + break; + case 7: /* VQSHL */ + GEN_NEON_INTEGER_OP_ENV(qshl); break; - default: - abort(); - } - break; - case 7: /* VQSHL */ - GEN_NEON_INTEGER_OP_ENV(qshl); - break; } tcg_temp_free_i32(tmp2); @@ -5295,33 +5437,33 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins } else if (op == 4 || (op == 5 && u)) { /* Insert */ switch (size) { - case 0: - if (op == 4) - mask = 0xff >> -shift; - else - mask = (uint8_t)(0xff << shift); - mask |= mask << 8; - mask |= mask << 16; - break; - case 1: - if (op == 4) - mask = 0xffff >> -shift; - else - mask = (uint16_t)(0xffff << shift); - mask |= mask << 16; - break; - case 2: - if (shift < -31 || shift > 31) { - mask = 0; - } else { + case 0: if (op == 4) - mask = 0xffffffffu >> -shift; + mask = 0xff >> -shift; else - mask = 0xffffffffu << shift; - } - break; - default: - abort(); + mask = (uint8_t)(0xff << shift); + mask |= mask << 8; + mask |= mask << 16; + break; + case 1: + if (op == 4) + mask = 0xffff >> -shift; + else + mask = (uint16_t)(0xffff << shift); + mask |= mask << 16; + break; + case 2: + if (shift < -31 || shift > 31) { + mask = 0; + } else { + if (op == 4) + mask = 0xffffffffu >> -shift; + else + mask = 0xffffffffu << shift; + } + break; + default: + abort(); } tmp2 = neon_load_reg(rd, pass); tcg_gen_andi_i32(tmp, tmp, mask); @@ -5372,11 +5514,11 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins tcg_temp_free_i64(tmp64); } else { if (size == 1) { - imm = (uint16_t)shift; + imm = (uint16_t) shift; imm |= imm << 16; } else { /* size == 2 */ - imm = (uint32_t)shift; + imm = (uint32_t) shift; } tmp2 = tcg_const_i32(imm); tmp4 = neon_load_reg(rm + 1, 0); @@ -5387,15 +5529,13 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins } else { tmp = tmp4; } - gen_neon_shift_narrow(size, tmp, tmp2, q, - input_unsigned); + gen_neon_shift_narrow(size, tmp, tmp2, q, input_unsigned); if (pass == 0) { tmp3 = neon_load_reg(rm, 1); } else { tmp3 = tmp5; } - gen_neon_shift_narrow(size, tmp3, tmp2, q, - input_unsigned); + gen_neon_shift_narrow(size, tmp3, tmp2, q, input_unsigned); tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp3); @@ -5439,7 +5579,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins imm = 0xffffffff >> (32 - shift); } if (size < 2) { - imm64 = imm | (((uint64_t)imm) << 32); + imm64 = imm | (((uint64_t) imm) << 32); } else { imm64 = imm; } @@ -5490,42 +5630,47 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins * valid constant encoding of 0 had been given. */ switch (op) { - case 0: case 1: - /* no-op */ - break; - case 2: case 3: - imm <<= 8; - break; - case 4: case 5: - imm <<= 16; - break; - case 6: case 7: - imm <<= 24; - break; - case 8: case 9: - imm |= imm << 16; - break; - case 10: case 11: - imm = (imm << 8) | (imm << 24); - break; - case 12: - imm = (imm << 8) | 0xff; - break; - case 13: - imm = (imm << 16) | 0xffff; - break; - case 14: - imm |= (imm << 8) | (imm << 16) | (imm << 24); - if (invert) - imm = ~imm; - break; - case 15: - if (invert) { - return 1; - } - imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) - | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); - break; + case 0: + case 1: + /* no-op */ + break; + case 2: + case 3: + imm <<= 8; + break; + case 4: + case 5: + imm <<= 16; + break; + case 6: + case 7: + imm <<= 24; + break; + case 8: + case 9: + imm |= imm << 16; + break; + case 10: + case 11: + imm = (imm << 8) | (imm << 24); + break; + case 12: + imm = (imm << 8) | 0xff; + break; + case 13: + imm = (imm << 16) | 0xffff; + break; + case 14: + imm |= (imm << 8) | (imm << 16) | (imm << 24); + if (invert) + imm = ~imm; + break; + case 15: + if (invert) { + return 1; + } + imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); + break; } if (invert) imm = ~imm; @@ -5598,14 +5743,10 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins src2_wide = neon_3reg_wide[op][2]; undefreq = neon_3reg_wide[op][3]; - if (((undefreq & 1) && (size != 0)) || - ((undefreq & 2) && (size == 0)) || - ((undefreq & 4) && u)) { + if (((undefreq & 1) && (size != 0)) || ((undefreq & 2) && (size == 0)) || ((undefreq & 4) && u)) { return 1; } - if ((src1_wide && (rn & 1)) || - (src2_wide && (rm & 1)) || - (!src2_wide && (rd & 1))) { + if ((src1_wide && (rn & 1)) || (src2_wide && (rm & 1)) || (!src2_wide && (rd & 1))) { return 1; } @@ -5648,48 +5789,59 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins } } switch (op) { - case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ - gen_neon_addl(size); - break; - case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ - gen_neon_subl(size); - break; - case 5: case 7: /* VABAL, VABDL */ - switch ((size << 1) | u) { case 0: - gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2); - break; case 1: - gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2); + case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ + gen_neon_addl(size); break; case 2: - gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2); - break; case 3: - gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2); - break; - case 4: - gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2); + case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ + gen_neon_subl(size); break; case 5: - gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2); + case 7: /* VABAL, VABDL */ + switch ((size << 1) | u) { + case 0: + gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2); + break; + case 1: + gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2); + break; + case 2: + gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2); + break; + case 3: + gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2); + break; + case 4: + gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2); + break; + case 5: + gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2); + break; + default: + abort(); + } + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); break; - default: abort(); - } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; - case 8: case 9: case 10: case 11: case 12: case 13: - /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ - gen_neon_mull(cpu_V0, tmp, tmp2, size, u); - break; - case 14: /* Polynomial VMULL */ - gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; - default: /* 15 is RESERVED: caught earlier */ - abort(); + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ + gen_neon_mull(cpu_V0, tmp, tmp2, size, u); + break; + case 14: /* Polynomial VMULL */ + gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + default: /* 15 is RESERVED: caught earlier */ + abort(); } if (op == 13) { /* VQDMULL */ @@ -5699,21 +5851,23 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins /* Accumulate. */ neon_load_reg64(cpu_V1, rd + pass); switch (op) { - case 10: /* VMLSL */ - gen_neon_negl(cpu_V0, size); - /* Fall through */ - case 5: case 8: /* VABAL, VMLAL */ - gen_neon_addl(size); - break; - case 9: case 11: /* VQDMLAL, VQDMLSL */ - gen_neon_addl_saturate(cpu_V0, cpu_V0, size); - if (op == 11) { + case 10: /* VMLSL */ gen_neon_negl(cpu_V0, size); - } - gen_neon_addl_saturate(cpu_V0, cpu_V1, size); - break; - default: - abort(); + /* Fall through */ + case 5: + case 8: /* VABAL, VMLAL */ + gen_neon_addl(size); + break; + case 9: + case 11: /* VQDMLAL, VQDMLSL */ + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + if (op == 11) { + gen_neon_negl(cpu_V0, size); + } + gen_neon_addl_saturate(cpu_V0, cpu_V1, size); + break; + default: + abort(); } neon_store_reg64(cpu_V0, rd + pass); } else if (op == 4 || op == 6) { @@ -5721,32 +5875,34 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins tmp = tcg_temp_new_i32(); if (!u) { switch (size) { - case 0: - gen_helper_neon_narrow_high_u8(tmp, cpu_V0); - break; - case 1: - gen_helper_neon_narrow_high_u16(tmp, cpu_V0); - break; - case 2: - tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); - break; - default: abort(); + case 0: + gen_helper_neon_narrow_high_u8(tmp, cpu_V0); + break; + case 1: + gen_helper_neon_narrow_high_u16(tmp, cpu_V0); + break; + case 2: + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(tmp, cpu_V0); + break; + default: + abort(); } } else { switch (size) { - case 0: - gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0); - break; - case 1: - gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0); - break; - case 2: - tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); - tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); - break; - default: abort(); + case 0: + gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0); + break; + case 1: + gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0); + break; + case 2: + tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); + tcg_gen_trunc_i64_i32(tmp, cpu_V0); + break; + default: + abort(); } } if (pass == 0) { @@ -5769,144 +5925,149 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins return 1; } switch (op) { - case 1: /* Float VMLA scalar */ - case 5: /* Floating point VMLS scalar */ - case 9: /* Floating point VMUL scalar */ - if (size == 1) { - return 1; - } + case 1: /* Float VMLA scalar */ + case 5: /* Floating point VMLS scalar */ + case 9: /* Floating point VMUL scalar */ + if (size == 1) { + return 1; + } /* fall through */ - case 0: /* Integer VMLA scalar */ - case 4: /* Integer VMLS scalar */ - case 8: /* Integer VMUL scalar */ - case 12: /* VQDMULH scalar */ - case 13: /* VQRDMULH scalar */ - if (u && ((rd | rn) & 1)) { - return 1; - } - tmp = neon_get_scalar(size, rm); - neon_store_scratch(0, tmp); - for (pass = 0; pass < (u ? 4 : 2); pass++) { - tmp = neon_load_scratch(0); - tmp2 = neon_load_reg(rn, pass); - if (op == 12) { - if (size == 1) { - gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); - } else { - gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); - } - } else if (op == 13) { - if (size == 1) { - gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); - } else { - gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); - } - } else if (op & 1) { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); - tcg_temp_free_ptr(fpstatus); - } else { - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); - } + case 0: /* Integer VMLA scalar */ + case 4: /* Integer VMLS scalar */ + case 8: /* Integer VMUL scalar */ + case 12: /* VQDMULH scalar */ + case 13: /* VQRDMULH scalar */ + if (u && ((rd | rn) & 1)) { + return 1; } - tcg_temp_free_i32(tmp2); - if (op < 8) { - /* Accumulate. */ - tmp2 = neon_load_reg(rd, pass); - switch (op) { - case 0: - gen_neon_add(size, tmp, tmp2); - break; - case 1: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); - tcg_temp_free_ptr(fpstatus); - break; - } - case 4: - gen_neon_rsb(size, tmp, tmp2); - break; - case 5: - { + tmp = neon_get_scalar(size, rm); + neon_store_scratch(0, tmp); + for (pass = 0; pass < (u ? 4 : 2); pass++) { + tmp = neon_load_scratch(0); + tmp2 = neon_load_reg(rn, pass); + if (op == 12) { + if (size == 1) { + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + } + } else if (op == 13) { + if (size == 1) { + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + } + } else if (op & 1) { TCGv_ptr fpstatus = get_fpstatus_ptr(1); - gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); + gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus); tcg_temp_free_ptr(fpstatus); - break; - } - default: - abort(); + } else { + switch (size) { + case 0: + gen_helper_neon_mul_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_mul_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_mul_i32(tmp, tmp, tmp2); + break; + default: + abort(); + } } tcg_temp_free_i32(tmp2); + if (op < 8) { + /* Accumulate. */ + tmp2 = neon_load_reg(rd, pass); + switch (op) { + case 0: + gen_neon_add(size, tmp, tmp2); + break; + case 1: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(fpstatus); + break; + } + case 4: + gen_neon_rsb(size, tmp, tmp2); + break; + case 5: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus); + tcg_temp_free_ptr(fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tmp2); + } + neon_store_reg(rd, pass, tmp); } - neon_store_reg(rd, pass, tmp); - } - break; - case 3: /* VQDMLAL scalar */ - case 7: /* VQDMLSL scalar */ - case 11: /* VQDMULL scalar */ - if (u == 1) { - return 1; - } - /* fall through */ - case 2: /* VMLAL sclar */ - case 6: /* VMLSL scalar */ - case 10: /* VMULL scalar */ - if (rd & 1) { - return 1; - } - tmp2 = neon_get_scalar(size, rm); - /* We need a copy of tmp2 because gen_neon_mull - * deletes it during pass 0. */ - tmp4 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp4, tmp2); - tmp3 = neon_load_reg(rn, 1); - - for (pass = 0; pass < 2; pass++) { - if (pass == 0) { - tmp = neon_load_reg(rn, 0); - } else { - tmp = tmp3; - tmp2 = tmp4; + break; + case 3: /* VQDMLAL scalar */ + case 7: /* VQDMLSL scalar */ + case 11: /* VQDMULL scalar */ + if (u == 1) { + return 1; } - gen_neon_mull(cpu_V0, tmp, tmp2, size, u); - if (op != 11) { - neon_load_reg64(cpu_V1, rd + pass); + /* fall through */ + case 2: /* VMLAL sclar */ + case 6: /* VMLSL scalar */ + case 10: /* VMULL scalar */ + if (rd & 1) { + return 1; } - switch (op) { - case 6: - gen_neon_negl(cpu_V0, size); - /* Fall through */ - case 2: - gen_neon_addl(size); - break; - case 3: case 7: - gen_neon_addl_saturate(cpu_V0, cpu_V0, size); - if (op == 7) { - gen_neon_negl(cpu_V0, size); + tmp2 = neon_get_scalar(size, rm); + /* We need a copy of tmp2 because gen_neon_mull + * deletes it during pass 0. */ + tmp4 = tcg_temp_new_i32(); + tcg_gen_mov_i32(tmp4, tmp2); + tmp3 = neon_load_reg(rn, 1); + + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(rn, 0); + } else { + tmp = tmp3; + tmp2 = tmp4; } - gen_neon_addl_saturate(cpu_V0, cpu_V1, size); - break; - case 10: - /* no-op */ - break; - case 11: - gen_neon_addl_saturate(cpu_V0, cpu_V0, size); - break; - default: - abort(); + gen_neon_mull(cpu_V0, tmp, tmp2, size, u); + if (op != 11) { + neon_load_reg64(cpu_V1, rd + pass); + } + switch (op) { + case 6: + gen_neon_negl(cpu_V0, size); + /* Fall through */ + case 2: + gen_neon_addl(size); + break; + case 3: + case 7: + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + if (op == 7) { + gen_neon_negl(cpu_V0, size); + } + gen_neon_addl_saturate(cpu_V0, cpu_V1, size); + break; + case 10: + /* no-op */ + break; + case 11: + gen_neon_addl_saturate(cpu_V0, cpu_V0, size); + break; + default: + abort(); + } + neon_store_reg64(cpu_V0, rd + pass); } - neon_store_reg64(cpu_V0, rd + pass); - } - - break; - default: /* 14 and 15 are RESERVED */ - return 1; + break; + default: /* 14 and 15 are RESERVED */ + return 1; } } } else { /* size == 3 */ @@ -5973,381 +6134,447 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins if ((neon_2rm_sizes[op] & (1 << size)) == 0) { return 1; } - if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && - q && ((rm | rd) & 1)) { + if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && q && ((rm | rd) & 1)) { return 1; } switch (op) { - case NEON_2RM_VREV64: - for (pass = 0; pass < (q ? 2 : 1); pass++) { - tmp = neon_load_reg(rm, pass * 2); - tmp2 = neon_load_reg(rm, pass * 2 + 1); - switch (size) { - case 0: tcg_gen_bswap32_i32(tmp, tmp); break; - case 1: gen_swap_half(tmp); break; - case 2: /* no-op */ break; - default: abort(); + case NEON_2RM_VREV64: + for (pass = 0; pass < (q ? 2 : 1); pass++) { + tmp = neon_load_reg(rm, pass * 2); + tmp2 = neon_load_reg(rm, pass * 2 + 1); + switch (size) { + case 0: + tcg_gen_bswap32_i32(tmp, tmp); + break; + case 1: + gen_swap_half(tmp); + break; + case 2: /* no-op */ + break; + default: + abort(); + } + neon_store_reg(rd, pass * 2 + 1, tmp); + if (size == 2) { + neon_store_reg(rd, pass * 2, tmp2); + } else { + switch (size) { + case 0: + tcg_gen_bswap32_i32(tmp2, tmp2); + break; + case 1: + gen_swap_half(tmp2); + break; + default: + abort(); + } + neon_store_reg(rd, pass * 2, tmp2); + } } - neon_store_reg(rd, pass * 2 + 1, tmp); + break; + case NEON_2RM_VPADDL: + case NEON_2RM_VPADDL_U: + case NEON_2RM_VPADAL: + case NEON_2RM_VPADAL_U: + for (pass = 0; pass < q + 1; pass++) { + tmp = neon_load_reg(rm, pass * 2); + gen_neon_widen(cpu_V0, tmp, size, op & 1); + tmp = neon_load_reg(rm, pass * 2 + 1); + gen_neon_widen(cpu_V1, tmp, size, op & 1); + switch (size) { + case 0: + gen_helper_neon_paddl_u16(CPU_V001); + break; + case 1: + gen_helper_neon_paddl_u32(CPU_V001); + break; + case 2: + tcg_gen_add_i64(CPU_V001); + break; + default: + abort(); + } + if (op >= NEON_2RM_VPADAL) { + /* Accumulate. */ + neon_load_reg64(cpu_V1, rd + pass); + gen_neon_addl(size); + } + neon_store_reg64(cpu_V0, rd + pass); + } + break; + case NEON_2RM_VTRN: if (size == 2) { - neon_store_reg(rd, pass * 2, tmp2); + int n; + for (n = 0; n < (q ? 4 : 2); n += 2) { + tmp = neon_load_reg(rm, n); + tmp2 = neon_load_reg(rd, n + 1); + neon_store_reg(rm, n, tmp2); + neon_store_reg(rd, n + 1, tmp); + } } else { - switch (size) { - case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break; - case 1: gen_swap_half(tmp2); break; - default: abort(); + goto elementwise; + } + break; + case NEON_2RM_VUZP: + if (gen_neon_unzip(rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VZIP: + if (gen_neon_zip(rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VMOVN: + case NEON_2RM_VQMOVN: + /* also VQMOVUN; op field and mnemonics don't line up */ + if (rm & 1) { + return 1; + } + TCGV_UNUSED(tmp2); + for (pass = 0; pass < 2; pass++) { + neon_load_reg64(cpu_V0, rm + pass); + tmp = tcg_temp_new_i32(); + gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size, tmp, cpu_V0); + if (pass == 0) { + tmp2 = tmp; + } else { + neon_store_reg(rd, 0, tmp2); + neon_store_reg(rd, 1, tmp); } - neon_store_reg(rd, pass * 2, tmp2); } - } - break; - case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: - case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: - for (pass = 0; pass < q + 1; pass++) { - tmp = neon_load_reg(rm, pass * 2); - gen_neon_widen(cpu_V0, tmp, size, op & 1); - tmp = neon_load_reg(rm, pass * 2 + 1); - gen_neon_widen(cpu_V1, tmp, size, op & 1); - switch (size) { - case 0: gen_helper_neon_paddl_u16(CPU_V001); break; - case 1: gen_helper_neon_paddl_u32(CPU_V001); break; - case 2: tcg_gen_add_i64(CPU_V001); break; - default: abort(); + break; + case NEON_2RM_VSHLL: + if (q || (rd & 1)) { + return 1; } - if (op >= NEON_2RM_VPADAL) { - /* Accumulate. */ - neon_load_reg64(cpu_V1, rd + pass); - gen_neon_addl(size); + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + gen_neon_widen(cpu_V0, tmp, size, 1); + tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size); + neon_store_reg64(cpu_V0, rd + pass); } - neon_store_reg64(cpu_V0, rd + pass); - } - break; - case NEON_2RM_VTRN: - if (size == 2) { - int n; - for (n = 0; n < (q ? 4 : 2); n += 2) { - tmp = neon_load_reg(rm, n); - tmp2 = neon_load_reg(rd, n + 1); - neon_store_reg(rm, n, tmp2); - neon_store_reg(rd, n + 1, tmp); + break; + case NEON_2RM_VCVT_F16_F32: + if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || q || (rm & 1)) { + return 1; } - } else { - goto elementwise; - } - break; - case NEON_2RM_VUZP: - if (gen_neon_unzip(rd, rm, size, q)) { - return 1; - } - break; - case NEON_2RM_VZIP: - if (gen_neon_zip(rd, rm, size, q)) { - return 1; - } - break; - case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: - /* also VQMOVUN; op field and mnemonics don't line up */ - if (rm & 1) { - return 1; - } - TCGV_UNUSED(tmp2); - for (pass = 0; pass < 2; pass++) { - neon_load_reg64(cpu_V0, rm + pass); tmp = tcg_temp_new_i32(); - gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size, - tmp, cpu_V0); - if (pass == 0) { - tmp2 = tmp; - } else { - neon_store_reg(rd, 0, tmp2); - neon_store_reg(rd, 1, tmp); + tmp2 = tcg_temp_new_i32(); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); + gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1)); + gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2)); + gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3)); + neon_store_reg(rd, 0, tmp2); + tmp2 = tcg_temp_new_i32(); + gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); + tcg_gen_shli_i32(tmp2, tmp2, 16); + tcg_gen_or_i32(tmp2, tmp2, tmp); + neon_store_reg(rd, 1, tmp2); + tcg_temp_free_i32(tmp); + break; + case NEON_2RM_VCVT_F32_F16: + if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || q || (rd & 1)) { + return 1; } - } - break; - case NEON_2RM_VSHLL: - if (q || (rd & 1)) { - return 1; - } - tmp = neon_load_reg(rm, 0); - tmp2 = neon_load_reg(rm, 1); - for (pass = 0; pass < 2; pass++) { - if (pass == 1) - tmp = tmp2; - gen_neon_widen(cpu_V0, tmp, size, 1); - tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size); - neon_store_reg64(cpu_V0, rd + pass); - } - break; - case NEON_2RM_VCVT_F16_F32: - if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || - q || (rm & 1)) { - return 1; - } - tmp = tcg_temp_new_i32(); - tmp2 = tcg_temp_new_i32(); - tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); - gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); - tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1)); - gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_or_i32(tmp2, tmp2, tmp); - tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2)); - gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); - tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3)); - neon_store_reg(rd, 0, tmp2); - tmp2 = tcg_temp_new_i32(); - gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_or_i32(tmp2, tmp2, tmp); - neon_store_reg(rd, 1, tmp2); - tcg_temp_free_i32(tmp); - break; - case NEON_2RM_VCVT_F32_F16: - if (!arm_feature(env, ARM_FEATURE_VFP_FP16) || - q || (rd & 1)) { - return 1; - } - tmp3 = tcg_temp_new_i32(); - tmp = neon_load_reg(rm, 0); - tmp2 = neon_load_reg(rm, 1); - tcg_gen_ext16u_i32(tmp3, tmp); - gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); - tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_shri_i32(tmp3, tmp, 16); - gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); - tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1)); - tcg_temp_free_i32(tmp); - tcg_gen_ext16u_i32(tmp3, tmp2); - gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); - tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2)); - tcg_gen_shri_i32(tmp3, tmp2, 16); - gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); - tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3)); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp3); - break; - default: - elementwise: - for (pass = 0; pass < (q ? 4 : 2); pass++) { - if (neon_2rm_is_float_op(op)) { - tcg_gen_ld_f32(cpu_F0s, cpu_env, - neon_reg_offset(rm, pass)); - TCGV_UNUSED(tmp); - } else { - tmp = neon_load_reg(rm, pass); - } - switch (op) { - case NEON_2RM_VREV32: - switch (size) { - case 0: tcg_gen_bswap32_i32(tmp, tmp); break; - case 1: gen_swap_half(tmp); break; - default: abort(); - } - break; - case NEON_2RM_VREV16: - gen_rev16(tmp); - break; - case NEON_2RM_VCLS: - switch (size) { - case 0: gen_helper_neon_cls_s8(tmp, tmp); break; - case 1: gen_helper_neon_cls_s16(tmp, tmp); break; - case 2: gen_helper_neon_cls_s32(tmp, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VCLZ: - switch (size) { - case 0: gen_helper_neon_clz_u8(tmp, tmp); break; - case 1: gen_helper_neon_clz_u16(tmp, tmp); break; - case 2: gen_helper_clz(tmp, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VCNT: - gen_helper_neon_cnt_u8(tmp, tmp); - break; - case NEON_2RM_VMVN: - tcg_gen_not_i32(tmp, tmp); - break; - case NEON_2RM_VQABS: - switch (size) { - case 0: - gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); - break; - case 1: - gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); - break; - case 2: - gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); - break; - default: abort(); - } - break; - case NEON_2RM_VQNEG: - switch (size) { - case 0: - gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); - break; - case 1: - gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); - break; - case 2: - gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); - break; - default: abort(); - } - break; - case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: - tmp2 = tcg_const_i32(0); - switch(size) { - case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free(tmp2); - if (op == NEON_2RM_VCLE0) { - tcg_gen_not_i32(tmp, tmp); - } - break; - case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: - tmp2 = tcg_const_i32(0); - switch(size) { - case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free(tmp2); - if (op == NEON_2RM_VCLT0) { - tcg_gen_not_i32(tmp, tmp); - } - break; - case NEON_2RM_VCEQ0: - tmp2 = tcg_const_i32(0); - switch(size) { - case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; - default: abort(); + tmp3 = tcg_temp_new_i32(); + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + tcg_gen_ext16u_i32(tmp3, tmp); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_shri_i32(tmp3, tmp, 16); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1)); + tcg_temp_free_i32(tmp); + tcg_gen_ext16u_i32(tmp3, tmp2); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2)); + tcg_gen_shri_i32(tmp3, tmp2, 16); + gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3)); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp3); + break; + default: + elementwise: + for (pass = 0; pass < (q ? 4 : 2); pass++) { + if (neon_2rm_is_float_op(op)) { + tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); + TCGV_UNUSED(tmp); + } else { + tmp = neon_load_reg(rm, pass); } - tcg_temp_free(tmp2); - break; - case NEON_2RM_VABS: - switch(size) { - case 0: gen_helper_neon_abs_s8(tmp, tmp); break; - case 1: gen_helper_neon_abs_s16(tmp, tmp); break; - case 2: tcg_gen_abs_i32(tmp, tmp); break; - default: abort(); + switch (op) { + case NEON_2RM_VREV32: + switch (size) { + case 0: + tcg_gen_bswap32_i32(tmp, tmp); + break; + case 1: + gen_swap_half(tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VREV16: + gen_rev16(tmp); + break; + case NEON_2RM_VCLS: + switch (size) { + case 0: + gen_helper_neon_cls_s8(tmp, tmp); + break; + case 1: + gen_helper_neon_cls_s16(tmp, tmp); + break; + case 2: + gen_helper_neon_cls_s32(tmp, tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VCLZ: + switch (size) { + case 0: + gen_helper_neon_clz_u8(tmp, tmp); + break; + case 1: + gen_helper_neon_clz_u16(tmp, tmp); + break; + case 2: + gen_helper_clz(tmp, tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VCNT: + gen_helper_neon_cnt_u8(tmp, tmp); + break; + case NEON_2RM_VMVN: + tcg_gen_not_i32(tmp, tmp); + break; + case NEON_2RM_VQABS: + switch (size) { + case 0: + gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); + break; + case 1: + gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); + break; + case 2: + gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VQNEG: + switch (size) { + case 0: + gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); + break; + case 1: + gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); + break; + case 2: + gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VCGT0: + case NEON_2RM_VCLE0: + tmp2 = tcg_const_i32(0); + switch (size) { + case 0: + gen_helper_neon_cgt_s8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_cgt_s16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_cgt_s32(tmp, tmp, tmp2); + break; + default: + abort(); + } + tcg_temp_free(tmp2); + if (op == NEON_2RM_VCLE0) { + tcg_gen_not_i32(tmp, tmp); + } + break; + case NEON_2RM_VCGE0: + case NEON_2RM_VCLT0: + tmp2 = tcg_const_i32(0); + switch (size) { + case 0: + gen_helper_neon_cge_s8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_cge_s16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_cge_s32(tmp, tmp, tmp2); + break; + default: + abort(); + } + tcg_temp_free(tmp2); + if (op == NEON_2RM_VCLT0) { + tcg_gen_not_i32(tmp, tmp); + } + break; + case NEON_2RM_VCEQ0: + tmp2 = tcg_const_i32(0); + switch (size) { + case 0: + gen_helper_neon_ceq_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_ceq_u16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_ceq_u32(tmp, tmp, tmp2); + break; + default: + abort(); + } + tcg_temp_free(tmp2); + break; + case NEON_2RM_VABS: + switch (size) { + case 0: + gen_helper_neon_abs_s8(tmp, tmp); + break; + case 1: + gen_helper_neon_abs_s16(tmp, tmp); + break; + case 2: + tcg_gen_abs_i32(tmp, tmp); + break; + default: + abort(); + } + break; + case NEON_2RM_VNEG: + tmp2 = tcg_const_i32(0); + gen_neon_rsb(size, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case NEON_2RM_VCGT0_F: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCGE0_F: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCEQ0_F: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCLE0_F: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VCLT0_F: { + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + tmp2 = tcg_const_i32(0); + gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus); + tcg_temp_free(tmp2); + tcg_temp_free_ptr(fpstatus); + break; + } + case NEON_2RM_VABS_F: + gen_vfp_abs(0); + break; + case NEON_2RM_VNEG_F: + gen_vfp_neg(0); + break; + case NEON_2RM_VSWP: + tmp2 = neon_load_reg(rd, pass); + neon_store_reg(rm, pass, tmp2); + break; + case NEON_2RM_VTRN: + tmp2 = neon_load_reg(rd, pass); + switch (size) { + case 0: + gen_neon_trn_u8(tmp, tmp2); + break; + case 1: + gen_neon_trn_u16(tmp, tmp2); + break; + default: + abort(); + } + neon_store_reg(rm, pass, tmp2); + break; + case NEON_2RM_VRECPE: + gen_helper_recpe_u32(tmp, tmp, cpu_env); + break; + case NEON_2RM_VRSQRTE: + gen_helper_rsqrte_u32(tmp, tmp, cpu_env); + break; + case NEON_2RM_VRECPE_F: + gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env); + break; + case NEON_2RM_VRSQRTE_F: + gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env); + break; + case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ + gen_vfp_sito(0, 1); + break; + case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ + gen_vfp_uito(0, 1); + break; + case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ + gen_vfp_tosiz(0, 1); + break; + case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ + gen_vfp_touiz(0, 1); + break; + default: + /* Reserved op values were caught by the + * neon_2rm_sizes[] check earlier. + */ + abort(); } - break; - case NEON_2RM_VNEG: - tmp2 = tcg_const_i32(0); - gen_neon_rsb(size, tmp, tmp2); - tcg_temp_free(tmp2); - break; - case NEON_2RM_VCGT0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - tmp2 = tcg_const_i32(0); - gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus); - tcg_temp_free(tmp2); - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_2RM_VCGE0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - tmp2 = tcg_const_i32(0); - gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus); - tcg_temp_free(tmp2); - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_2RM_VCEQ0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - tmp2 = tcg_const_i32(0); - gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus); - tcg_temp_free(tmp2); - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_2RM_VCLE0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - tmp2 = tcg_const_i32(0); - gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus); - tcg_temp_free(tmp2); - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_2RM_VCLT0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(1); - tmp2 = tcg_const_i32(0); - gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus); - tcg_temp_free(tmp2); - tcg_temp_free_ptr(fpstatus); - break; - } - case NEON_2RM_VABS_F: - gen_vfp_abs(0); - break; - case NEON_2RM_VNEG_F: - gen_vfp_neg(0); - break; - case NEON_2RM_VSWP: - tmp2 = neon_load_reg(rd, pass); - neon_store_reg(rm, pass, tmp2); - break; - case NEON_2RM_VTRN: - tmp2 = neon_load_reg(rd, pass); - switch (size) { - case 0: gen_neon_trn_u8(tmp, tmp2); break; - case 1: gen_neon_trn_u16(tmp, tmp2); break; - default: abort(); + if (neon_2rm_is_float_op(op)) { + tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); + } else { + neon_store_reg(rd, pass, tmp); } - neon_store_reg(rm, pass, tmp2); - break; - case NEON_2RM_VRECPE: - gen_helper_recpe_u32(tmp, tmp, cpu_env); - break; - case NEON_2RM_VRSQRTE: - gen_helper_rsqrte_u32(tmp, tmp, cpu_env); - break; - case NEON_2RM_VRECPE_F: - gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env); - break; - case NEON_2RM_VRSQRTE_F: - gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env); - break; - case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ - gen_vfp_sito(0, 1); - break; - case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ - gen_vfp_uito(0, 1); - break; - case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ - gen_vfp_tosiz(0, 1); - break; - case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ - gen_vfp_touiz(0, 1); - break; - default: - /* Reserved op values were caught by the - * neon_2rm_sizes[] check earlier. - */ - abort(); - } - if (neon_2rm_is_float_op(op)) { - tcg_gen_st_f32(cpu_F0s, cpu_env, - neon_reg_offset(rd, pass)); - } else { - neon_store_reg(rd, pass, tmp); } - } - break; + break; } } else if ((insn & (1 << 10)) == 0) { /* VTBL, VTBX. */ @@ -6415,8 +6642,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins return 0; } -static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_cp14_read(CPUARMState *env, DisasContext *s, uint32_t insn) { int crn = (insn >> 16) & 0xf; int crm = insn & 0xf; int op1 = (insn >> 21) & 7; @@ -6427,28 +6653,28 @@ static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn) /* Minimal set of debug registers, since we don't support debug */ if (op1 == 0 && crn == 0 && op2 == 0) { switch (crm) { - case 0: - /* DBGDIDR: just RAZ. In particular this means the - * "debug architecture version" bits will read as - * a reserved value, which should cause Linux to - * not try to use the debug hardware. - */ - tmp = tcg_const_i32(0); - store_reg(s, rt, tmp); - return 0; - case 1: - case 2: - /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we - * don't implement memory mapped debug components - */ - if (ENABLE_ARCH_7) { + case 0: + /* DBGDIDR: just RAZ. In particular this means the + * "debug architecture version" bits will read as + * a reserved value, which should cause Linux to + * not try to use the debug hardware. + */ tmp = tcg_const_i32(0); store_reg(s, rt, tmp); return 0; - } - break; - default: - break; + case 1: + case 2: + /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we + * don't implement memory mapped debug components + */ + if (ENABLE_ARCH_7) { + tmp = tcg_const_i32(0); + store_reg(s, rt, tmp); + return 0; + } + break; + default: + break; } } @@ -6473,8 +6699,7 @@ static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn) return 1; } -static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_cp14_write(CPUARMState *env, DisasContext *s, uint32_t insn) { int crn = (insn >> 16) & 0xf; int crm = insn & 0xf; int op1 = (insn >> 21) & 7; @@ -6504,49 +6729,45 @@ static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn) return 1; } -static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) -{ +static int disas_coproc_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { int cpnum; cpnum = (insn >> 8) & 0xf; - if (arm_feature(env, ARM_FEATURE_XSCALE) - && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum))) - return 1; + if (arm_feature(env, ARM_FEATURE_XSCALE) && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum))) + return 1; switch (cpnum) { - case 0: - case 1: - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - return disas_iwmmxt_insn(env, s, insn); - } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { - return disas_dsp_insn(env, s, insn); - } - return 1; - case 10: - case 11: - return disas_vfp_insn (env, s, insn); - case 14: - /* Coprocessors 7-15 are architecturally reserved by ARM. - Unfortunately Intel decided to ignore this. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) - goto board; - if (insn & (1 << 20)) - return disas_cp14_read(env, s, insn); - else - return disas_cp14_write(env, s, insn); - case 15: - return disas_cp15_insn (env, s, insn); - default: - board: - /* Unknown coprocessor. See if the board has hooked it. */ - return disas_cp_insn (env, s, insn); + case 0: + case 1: + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + return disas_iwmmxt_insn(env, s, insn); + } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { + return disas_dsp_insn(env, s, insn); + } + return 1; + case 10: + case 11: + return disas_vfp_insn(env, s, insn); + case 14: + /* Coprocessors 7-15 are architecturally reserved by ARM. + Unfortunately Intel decided to ignore this. */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) + goto board; + if (insn & (1 << 20)) + return disas_cp14_read(env, s, insn); + else + return disas_cp14_write(env, s, insn); + case 15: + return disas_cp15_insn(env, s, insn); + default: + board: + /* Unknown coprocessor. See if the board has hooked it. */ + return disas_cp_insn(env, s, insn); } } - /* Store a 64-bit value to a register pair. Clobbers val. */ -static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) -{ +static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { TCGv tmp; tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, val); @@ -6558,8 +6779,7 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) } /* load a 32-bit value from a register and perform a 64-bit accumulate. */ -static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) -{ +static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) { TCGv_i64 tmp; TCGv tmp2; @@ -6573,8 +6793,7 @@ static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) } /* load and add a 64-bit value from a register pair. */ -static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) -{ +static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) { TCGv_i64 tmp; TCGv tmpl; TCGv tmph; @@ -6591,8 +6810,7 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) } /* Set N and Z flags from a 64-bit value. */ -static void gen_logicq_cc(TCGv_i64 val) -{ +static void gen_logicq_cc(TCGv_i64 val) { TCGv tmp = tcg_temp_new_i32(); gen_helper_logicq_cc(tmp, val); gen_logic_CC(tmp); @@ -6608,24 +6826,22 @@ static void gen_logicq_cc(TCGv_i64 val) In system emulation mode only one CPU will be running at once, so this sequence is effectively atomic. In user emulation mode we throw an exception and handle the atomic operation elsewhere. */ -static void gen_load_exclusive(DisasContext *s, int rt, int rt2, - TCGv addr, int size) -{ +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv addr, int size) { TCGv tmp; switch (size) { - case 0: - tmp = gen_ld8u(addr, IS_USER(s)); - break; - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 2: - case 3: - tmp = gen_ld32(addr, IS_USER(s)); - break; - default: - abort(); + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + case 3: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: + abort(); } tcg_gen_mov_i32(cpu_exclusive_val, tmp); store_reg(s, rt, tmp); @@ -6640,24 +6856,18 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, tcg_gen_mov_i32(cpu_exclusive_addr, addr); } -static void gen_clrex(DisasContext *s) -{ +static void gen_clrex(DisasContext *s) { tcg_gen_movi_i32(cpu_exclusive_addr, -1); } #ifdef CONFIG_USER_ONLY -static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, - TCGv addr, int size) -{ +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv addr, int size) { tcg_gen_mov_i32(cpu_exclusive_test, addr); - tcg_gen_movi_i32(cpu_exclusive_info, - size | (rd << 4) | (rt << 8) | (rt2 << 12)); + tcg_gen_movi_i32(cpu_exclusive_info, size | (rd << 4) | (rt << 8) | (rt2 << 12)); gen_exception_insn(s, 4, EXCP_STREX); } #else -static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, - TCGv addr, int size) -{ +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv addr, int size) { TCGv tmp; int done_label; int fail_label; @@ -6672,18 +6882,18 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, done_label = gen_new_label(); tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label); switch (size) { - case 0: - tmp = gen_ld8u(addr, IS_USER(s)); - break; - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 2: - case 3: - tmp = gen_ld32(addr, IS_USER(s)); - break; - default: - abort(); + case 0: + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + case 3: + tmp = gen_ld32(addr, IS_USER(s)); + break; + default: + abort(); } tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label); tcg_temp_free_i32(tmp); @@ -6697,18 +6907,18 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, } tmp = load_reg(s, rt); switch (size) { - case 0: - gen_st8(tmp, addr, IS_USER(s)); - break; - case 1: - gen_st16(tmp, addr, IS_USER(s)); - break; - case 2: - case 3: - gen_st32(tmp, addr, IS_USER(s)); - break; - default: - abort(); + case 0: + gen_st8(tmp, addr, IS_USER(s)); + break; + case 1: + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: + case 3: + gen_st32(tmp, addr, IS_USER(s)); + break; + default: + abort(); } if (size == 3) { tcg_gen_addi_i32(addr, addr, 4); @@ -6724,8 +6934,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, } #endif -static void disas_arm_insn(CPUARMState * env, DisasContext *s) -{ +static void disas_arm_insn(CPUARMState *env, DisasContext *s) { unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh; TCGv tmp; TCGv tmp2; @@ -6740,7 +6949,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) if (IS_M(env)) goto illegal_op; cond = insn >> 28; - if (cond == 0xf){ + if (cond == 0xf) { /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we * choose to UNDEF. In ARMv5 and above the space is used * for miscellaneous unconditional instructions. @@ -6766,8 +6975,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) goto illegal_op; return; } - if (((insn & 0x0f30f000) == 0x0510f000) || - ((insn & 0x0f30f010) == 0x0710f000)) { + if (((insn & 0x0f30f000) == 0x0510f000) || ((insn & 0x0f30f010) == 0x0710f000)) { if ((insn & (1 << 22)) == 0) { /* PLDW; v7MP */ if (!arm_feature(env, ARM_FEATURE_V7MP)) { @@ -6778,13 +6986,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) ARCH(5TE); return; } - if (((insn & 0x0f70f000) == 0x0450f000) || - ((insn & 0x0f70f010) == 0x0650f000)) { + if (((insn & 0x0f70f000) == 0x0450f000) || ((insn & 0x0f70f010) == 0x0650f000)) { ARCH(7); return; /* PLI; V7 */ } - if (((insn & 0x0f700000) == 0x04100000) || - ((insn & 0x0f700010) == 0x06100000)) { + if (((insn & 0x0f700000) == 0x04100000) || ((insn & 0x0f700010) == 0x06100000)) { if (!arm_feature(env, ARM_FEATURE_V7MP)) { goto illegal_op; } @@ -6801,18 +7007,18 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) return; } else if ((insn & 0x0fffff00) == 0x057ff000) { switch ((insn >> 4) & 0xf) { - case 1: /* clrex */ - ARCH(6K); - gen_clrex(s); - return; - case 4: /* dsb */ - case 5: /* dmb */ - case 6: /* isb */ - ARCH(7); - /* We don't emulate caches so these are a no-op. */ - return; - default: - goto illegal_op; + case 1: /* clrex */ + ARCH(6K); + gen_clrex(s); + return; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + ARCH(7); + /* We don't emulate caches so these are a no-op. */ + return; + default: + goto illegal_op; } } else if ((insn & 0x0e5fffe0) == 0x084d0500) { /* srs */ @@ -6827,11 +7033,20 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) tcg_temp_free_i32(tmp); i = (insn >> 23) & 3; switch (i) { - case 0: offset = -4; break; /* DA */ - case 1: offset = 0; break; /* IA */ - case 2: offset = -8; break; /* DB */ - case 3: offset = 4; break; /* IB */ - default: abort(); + case 0: + offset = -4; + break; /* DA */ + case 1: + offset = 0; + break; /* IA */ + case 2: + offset = -8; + break; /* DB */ + case 3: + offset = 4; + break; /* IB */ + default: + abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); @@ -6843,11 +7058,20 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) if (insn & (1 << 21)) { /* Base writeback. */ switch (i) { - case 0: offset = -8; break; - case 1: offset = 4; break; - case 2: offset = -4; break; - case 3: offset = 0; break; - default: abort(); + case 0: + offset = -8; + break; + case 1: + offset = 4; + break; + case 2: + offset = -4; + break; + case 3: + offset = 0; + break; + default: + abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); @@ -6869,11 +7093,20 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) addr = load_reg(s, rn); i = (insn >> 23) & 3; switch (i) { - case 0: offset = -4; break; /* DA */ - case 1: offset = 0; break; /* IA */ - case 2: offset = -8; break; /* DB */ - case 3: offset = 4; break; /* IB */ - default: abort(); + case 0: + offset = -4; + break; /* DA */ + case 1: + offset = 0; + break; /* IA */ + case 2: + offset = -8; + break; /* DB */ + case 3: + offset = 4; + break; /* IB */ + default: + abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); @@ -6884,11 +7117,20 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) if (insn & (1 << 21)) { /* Base writeback. */ switch (i) { - case 0: offset = -8; break; - case 1: offset = 4; break; - case 2: offset = -4; break; - case 3: offset = 0; break; - default: abort(); + case 0: + offset = -8; + break; + case 1: + offset = 4; + break; + case 2: + offset = -4; + break; + case 3: + offset = 0; + break; + default: + abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); @@ -6902,12 +7144,12 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) /* branch link and change to thumb (blx ) */ int32_t offset; - val = (uint32_t)s->pc; + val = (uint32_t) s->pc; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); store_reg(s, 14, tmp); /* Sign-extend the 24-bit offset */ - offset = (((int32_t)insn) << 8) >> 8; + offset = (((int32_t) insn) << 8) >> 8; /* offset * 4 + bit24 * 2 + (thumb bit) */ val += (offset << 2) | ((insn >> 23) & 2) | 1; /* pipeline offset */ @@ -6994,155 +7236,152 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) goto illegal_op; } } - } else if ((insn & 0x0f900000) == 0x01000000 - && (insn & 0x00000090) != 0x00000090) { + } else if ((insn & 0x0f900000) == 0x01000000 && (insn & 0x00000090) != 0x00000090) { /* miscellaneous instructions */ op1 = (insn >> 21) & 3; sh = (insn >> 4) & 0xf; rm = insn & 0xf; switch (sh) { - case 0x0: /* move program status register */ - if (op1 & 1) { - /* PSR = reg */ - tmp = load_reg(s, rm); - i = ((op1 & 2) != 0); - if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp)) - goto illegal_op; - } else { - /* reg = PSR */ - rd = (insn >> 12) & 0xf; - if (op1 & 2) { - if (IS_USER(s)) + case 0x0: /* move program status register */ + if (op1 & 1) { + /* PSR = reg */ + tmp = load_reg(s, rm); + i = ((op1 & 2) != 0); + if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp)) goto illegal_op; - tmp = load_cpu_field(spsr); } else { - tmp = tcg_temp_new_i32(); - gen_helper_cpsr_read(tmp); + /* reg = PSR */ + rd = (insn >> 12) & 0xf; + if (op1 & 2) { + if (IS_USER(s)) + goto illegal_op; + tmp = load_cpu_field(spsr); + } else { + tmp = tcg_temp_new_i32(); + gen_helper_cpsr_read(tmp); + } + store_reg(s, rd, tmp); } - store_reg(s, rd, tmp); - } - break; - case 0x1: - if (op1 == 1) { - /* branch/exchange thumb (bx). */ - ARCH(4T); - tmp = load_reg(s, rm); - gen_bx(s, tmp); - } else if (op1 == 3) { - /* clz */ - ARCH(5); - rd = (insn >> 12) & 0xf; - tmp = load_reg(s, rm); - gen_helper_clz(tmp, tmp); - store_reg(s, rd, tmp); - } else { - goto illegal_op; - } - break; - case 0x2: - if (op1 == 1) { - ARCH(5J); /* bxj */ - /* Trivial implementation equivalent to bx. */ + break; + case 0x1: + if (op1 == 1) { + /* branch/exchange thumb (bx). */ + ARCH(4T); + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else if (op1 == 3) { + /* clz */ + ARCH(5); + rd = (insn >> 12) & 0xf; + tmp = load_reg(s, rm); + gen_helper_clz(tmp, tmp); + store_reg(s, rd, tmp); + } else { + goto illegal_op; + } + break; + case 0x2: + if (op1 == 1) { + ARCH(5J); /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else { + goto illegal_op; + } + break; + case 0x3: + if (op1 != 1) + goto illegal_op; + + ARCH(5); + /* branch link/exchange thumb (blx) */ tmp = load_reg(s, rm); + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc); + store_reg(s, 14, tmp2); gen_bx(s, tmp); - } else { - goto illegal_op; - } - break; - case 0x3: - if (op1 != 1) - goto illegal_op; - - ARCH(5); - /* branch link/exchange thumb (blx) */ - tmp = load_reg(s, rm); - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, s->pc); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - break; - case 0x5: /* saturating add/subtract */ - ARCH(5TE); - rd = (insn >> 12) & 0xf; - rn = (insn >> 16) & 0xf; - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rn); - if (op1 & 2) - gen_helper_double_saturate(tmp2, tmp2); - if (op1 & 1) - gen_helper_sub_saturate(tmp, tmp, tmp2); - else - gen_helper_add_saturate(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - break; - case 7: - /* SMC instruction (op1 == 3) - and undefined instructions (op1 == 0 || op1 == 2) - will trap */ - if (op1 != 1) { - goto illegal_op; - } - /* bkpt */ - ARCH(5); - gen_exception_insn(s, 4, EXCP_BKPT); - break; - case 0x8: /* signed multiply */ - case 0xa: - case 0xc: - case 0xe: - ARCH(5TE); - rs = (insn >> 8) & 0xf; - rn = (insn >> 12) & 0xf; - rd = (insn >> 16) & 0xf; - if (op1 == 1) { - /* (32 * 16) >> 16 */ + break; + case 0x5: /* saturating add/subtract */ + ARCH(5TE); + rd = (insn >> 12) & 0xf; + rn = (insn >> 16) & 0xf; tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (sh & 4) - tcg_gen_sari_i32(tmp2, tmp2, 16); + tmp2 = load_reg(s, rn); + if (op1 & 2) + gen_helper_double_saturate(tmp2, tmp2); + if (op1 & 1) + gen_helper_sub_saturate(tmp, tmp, tmp2); else - gen_sxth(tmp2); - tmp64 = gen_muls_i64_i32(tmp, tmp2); - tcg_gen_shri_i64(tmp64, tmp64, 16); - tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); - if ((sh & 2) == 0) { - tmp2 = load_reg(s, rn); - gen_helper_add_setq(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - store_reg(s, rd, tmp); - } else { - /* 16 * 16 */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - gen_mulxy(tmp, tmp2, sh & 2, sh & 4); + gen_helper_add_saturate(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); - if (op1 == 2) { - tmp64 = tcg_temp_new_i64(); - tcg_gen_ext_i32_i64(tmp64, tmp); - tcg_temp_free_i32(tmp); - gen_addq(s, tmp64, rn, rd); - gen_storeq_reg(s, rn, rd, tmp64); + store_reg(s, rd, tmp); + break; + case 7: + /* SMC instruction (op1 == 3) + and undefined instructions (op1 == 0 || op1 == 2) + will trap */ + if (op1 != 1) { + goto illegal_op; + } + /* bkpt */ + ARCH(5); + gen_exception_insn(s, 4, EXCP_BKPT); + break; + case 0x8: /* signed multiply */ + case 0xa: + case 0xc: + case 0xe: + ARCH(5TE); + rs = (insn >> 8) & 0xf; + rn = (insn >> 12) & 0xf; + rd = (insn >> 16) & 0xf; + if (op1 == 1) { + /* (32 * 16) >> 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (sh & 4) + tcg_gen_sari_i32(tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + tcg_gen_shri_i64(tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); - } else { - if (op1 == 0) { + if ((sh & 2) == 0) { tmp2 = load_reg(s, rn); gen_helper_add_setq(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } store_reg(s, rd, tmp); + } else { + /* 16 * 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_mulxy(tmp, tmp2, sh & 2, sh & 4); + tcg_temp_free_i32(tmp2); + if (op1 == 2) { + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rn, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); + } else { + if (op1 == 0) { + tmp2 = load_reg(s, rn); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rd, tmp); + } } - } - break; - default: - goto illegal_op; + break; + default: + goto illegal_op; } - } else if (((insn & 0x0e000000) == 0 && - (insn & 0x00000090) != 0x90) || - ((insn & 0x0e000000) == (1 << 25))) { + } else if (((insn & 0x0e000000) == 0 && (insn & 0x00000090) != 0x90) || ((insn & 0x0e000000) == (1 << 25))) { int set_cc, logic_cc, shiftop; op1 = (insn >> 21) & 0xf; @@ -7164,7 +7403,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) } } else { /* register */ - rm = (insn) & 0xf; + rm = (insn) &0xf; tmp2 = load_reg(s, rm); shiftop = (insn >> 5) & 3; if (!(insn & (1 << 4))) { @@ -7183,140 +7422,140 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) TCGV_UNUSED(tmp); } rd = (insn >> 12) & 0xf; - switch(op1) { - case 0x00: - tcg_gen_and_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x01: - tcg_gen_xor_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x02: - if (set_cc && rd == 15) { - /* SUBS r15, ... is used for exception return. */ - if (IS_USER(s)) { - goto illegal_op; + switch (op1) { + case 0x00: + tcg_gen_and_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); } - gen_helper_sub_cc(tmp, tmp, tmp2); - gen_exception_return(s, tmp); - } else { - if (set_cc) { + store_reg_bx(env, s, rd, tmp); + break; + case 0x01: + tcg_gen_xor_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x02: + if (set_cc && rd == 15) { + /* SUBS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } gen_helper_sub_cc(tmp, tmp, tmp2); + gen_exception_return(s, tmp); } else { - tcg_gen_sub_i32(tmp, tmp, tmp2); + if (set_cc) { + gen_helper_sub_cc(tmp, tmp, tmp2); + } else { + tcg_gen_sub_i32(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + } + break; + case 0x03: + if (set_cc) { + gen_helper_sub_cc(tmp, tmp2, tmp); + } else { + tcg_gen_sub_i32(tmp, tmp2, tmp); } store_reg_bx(env, s, rd, tmp); - } - break; - case 0x03: - if (set_cc) { - gen_helper_sub_cc(tmp, tmp2, tmp); - } else { - tcg_gen_sub_i32(tmp, tmp2, tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x04: - if (set_cc) { - gen_helper_add_cc(tmp, tmp, tmp2); - } else { - tcg_gen_add_i32(tmp, tmp, tmp2); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x05: - if (set_cc) { - gen_helper_adc_cc(tmp, tmp, tmp2); - } else { - gen_add_carry(tmp, tmp, tmp2); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x06: - if (set_cc) { - gen_helper_sbc_cc(tmp, tmp, tmp2); - } else { - gen_sub_carry(tmp, tmp, tmp2); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x07: - if (set_cc) { - gen_helper_sbc_cc(tmp, tmp2, tmp); - } else { - gen_sub_carry(tmp, tmp2, tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x08: - if (set_cc) { - tcg_gen_and_i32(tmp, tmp, tmp2); - gen_logic_CC(tmp); - } - tcg_temp_free_i32(tmp); - break; - case 0x09: - if (set_cc) { - tcg_gen_xor_i32(tmp, tmp, tmp2); - gen_logic_CC(tmp); - } - tcg_temp_free_i32(tmp); - break; - case 0x0a: - if (set_cc) { - gen_helper_sub_cc(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp); - break; - case 0x0b: - if (set_cc) { - gen_helper_add_cc(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp); - break; - case 0x0c: - tcg_gen_or_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - case 0x0d: - if (logic_cc && rd == 15) { - /* MOVS r15, ... is used for exception return. */ - if (IS_USER(s)) { - goto illegal_op; + break; + case 0x04: + if (set_cc) { + gen_helper_add_cc(tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); } - gen_exception_return(s, tmp2); - } else { + store_reg_bx(env, s, rd, tmp); + break; + case 0x05: + if (set_cc) { + gen_helper_adc_cc(tmp, tmp, tmp2); + } else { + gen_add_carry(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x06: + if (set_cc) { + gen_helper_sbc_cc(tmp, tmp, tmp2); + } else { + gen_sub_carry(tmp, tmp, tmp2); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x07: + if (set_cc) { + gen_helper_sbc_cc(tmp, tmp2, tmp); + } else { + gen_sub_carry(tmp, tmp2, tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x08: + if (set_cc) { + tcg_gen_and_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + } + tcg_temp_free_i32(tmp); + break; + case 0x09: + if (set_cc) { + tcg_gen_xor_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + } + tcg_temp_free_i32(tmp); + break; + case 0x0a: + if (set_cc) { + gen_helper_sub_cc(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp); + break; + case 0x0b: + if (set_cc) { + gen_helper_add_cc(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp); + break; + case 0x0c: + tcg_gen_or_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + case 0x0d: + if (logic_cc && rd == 15) { + /* MOVS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + gen_exception_return(s, tmp2); + } else { + if (logic_cc) { + gen_logic_CC(tmp2); + } + store_reg_bx(env, s, rd, tmp2); + } + break; + case 0x0e: + tcg_gen_andc_i32(tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(tmp); + } + store_reg_bx(env, s, rd, tmp); + break; + default: + case 0x0f: + tcg_gen_not_i32(tmp2, tmp2); if (logic_cc) { gen_logic_CC(tmp2); } store_reg_bx(env, s, rd, tmp2); - } - break; - case 0x0e: - tcg_gen_andc_i32(tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(tmp); - } - store_reg_bx(env, s, rd, tmp); - break; - default: - case 0x0f: - tcg_gen_not_i32(tmp2, tmp2); - if (logic_cc) { - gen_logic_CC(tmp2); - } - store_reg_bx(env, s, rd, tmp2); - break; + break; } if (op1 != 0x0f && op1 != 0x0d) { tcg_temp_free_i32(tmp2); @@ -7324,551 +7563,580 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) } else { /* other instructions */ op1 = (insn >> 24) & 0xf; - switch(op1) { - case 0x0: - case 0x1: - /* multiplies, extra load/stores */ - sh = (insn >> 5) & 3; - if (sh == 0) { - if (op1 == 0x0) { - rd = (insn >> 16) & 0xf; - rn = (insn >> 12) & 0xf; - rs = (insn >> 8) & 0xf; - rm = (insn) & 0xf; - op1 = (insn >> 20) & 0xf; - switch (op1) { - case 0: case 1: case 2: case 3: case 6: - /* 32 bit mul */ - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - tcg_gen_mul_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - if (insn & (1 << 22)) { - /* Subtract (mls) */ - ARCH(6T2); - tmp2 = load_reg(s, rn); - tcg_gen_sub_i32(tmp, tmp2, tmp); - tcg_temp_free_i32(tmp2); - } else if (insn & (1 << 21)) { - /* Add */ - tmp2 = load_reg(s, rn); - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - if (insn & (1 << 20)) - gen_logic_CC(tmp); - store_reg(s, rd, tmp); - break; - case 4: - /* 64 bit mul double accumulate (UMAAL) */ - ARCH(6); - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - tmp64 = gen_mulu_i64_i32(tmp, tmp2); - gen_addq_lo(s, tmp64, rn); - gen_addq_lo(s, tmp64, rd); - gen_storeq_reg(s, rn, rd, tmp64); - tcg_temp_free_i64(tmp64); - break; - case 8: case 9: case 10: case 11: - case 12: case 13: case 14: case 15: - /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - if (insn & (1 << 22)) { - tmp64 = gen_muls_i64_i32(tmp, tmp2); - } else { - tmp64 = gen_mulu_i64_i32(tmp, tmp2); - } - if (insn & (1 << 21)) { /* mult accumulate */ - gen_addq(s, tmp64, rn, rd); - } - if (insn & (1 << 20)) { - gen_logicq_cc(tmp64); - } - gen_storeq_reg(s, rn, rd, tmp64); - tcg_temp_free_i64(tmp64); - break; - default: - goto illegal_op; - } - } else { - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (insn & (1 << 23)) { - /* load/store exclusive */ - op1 = (insn >> 21) & 0x3; - if (op1) - ARCH(6K); - else - ARCH(6); - addr = tcg_temp_local_new_i32(); - load_reg_var(s, addr, rn); - if (insn & (1 << 20)) { - switch (op1) { - case 0: /* ldrex */ - gen_load_exclusive(s, rd, 15, addr, 2); - break; - case 1: /* ldrexd */ - gen_load_exclusive(s, rd, rd + 1, addr, 3); - break; - case 2: /* ldrexb */ - gen_load_exclusive(s, rd, 15, addr, 0); - break; - case 3: /* ldrexh */ - gen_load_exclusive(s, rd, 15, addr, 1); - break; - default: - abort(); - } - } else { - rm = insn & 0xf; - switch (op1) { - case 0: /* strex */ - gen_store_exclusive(s, rd, rm, 15, addr, 2); - break; - case 1: /* strexd */ - gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); + switch (op1) { + case 0x0: + case 0x1: + /* multiplies, extra load/stores */ + sh = (insn >> 5) & 3; + if (sh == 0) { + if (op1 == 0x0) { + rd = (insn >> 16) & 0xf; + rn = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + rm = (insn) &0xf; + op1 = (insn >> 20) & 0xf; + switch (op1) { + case 0: + case 1: + case 2: + case 3: + case 6: + /* 32 bit mul */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tcg_gen_mul_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (insn & (1 << 22)) { + /* Subtract (mls) */ + ARCH(6T2); + tmp2 = load_reg(s, rn); + tcg_gen_sub_i32(tmp, tmp2, tmp); + tcg_temp_free_i32(tmp2); + } else if (insn & (1 << 21)) { + /* Add */ + tmp2 = load_reg(s, rn); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + if (insn & (1 << 20)) + gen_logic_CC(tmp); + store_reg(s, rd, tmp); break; - case 2: /* strexb */ - gen_store_exclusive(s, rd, rm, 15, addr, 0); + case 4: + /* 64 bit mul double accumulate (UMAAL) */ + ARCH(6); + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + gen_addq_lo(s, tmp64, rn); + gen_addq_lo(s, tmp64, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); break; - case 3: /* strexh */ - gen_store_exclusive(s, rd, rm, 15, addr, 1); + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: + /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + if (insn & (1 << 22)) { + tmp64 = gen_muls_i64_i32(tmp, tmp2); + } else { + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + } + if (insn & (1 << 21)) { /* mult accumulate */ + gen_addq(s, tmp64, rn, rd); + } + if (insn & (1 << 20)) { + gen_logicq_cc(tmp64); + } + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tmp64); break; default: - abort(); - } - } - tcg_temp_free(addr); - } else { - /* SWP instruction */ - rm = (insn) & 0xf; - - /* ??? This is not really atomic. However we know - we never have multiple CPUs running in parallel, - so it is good enough. */ - addr = load_reg(s, rn); - tmp = load_reg(s, rm); - if (insn & (1 << 22)) { - tmp2 = gen_ld8u(addr, IS_USER(s)); - gen_st8(tmp, addr, IS_USER(s)); - } else { - tmp2 = gen_ld32(addr, IS_USER(s)); - gen_st32(tmp, addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - store_reg(s, rd, tmp2); - } - } - } else { - int address_offset; - int load; - /* Misc load/store */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - addr = load_reg(s, rn); - if (insn & (1 << 24)) - gen_add_datah_offset(s, insn, 0, addr); - address_offset = 0; - if (insn & (1 << 20)) { - /* load */ - switch(sh) { - case 1: - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 2: - tmp = gen_ld8s(addr, IS_USER(s)); - break; - default: - case 3: - tmp = gen_ld16s(addr, IS_USER(s)); - break; - } - load = 1; - } else if (sh & 2) { - ARCH(5TE); - /* doubleword */ - if (sh & 1) { - /* store */ - tmp = load_reg(s, rd); - gen_st32(tmp, addr, IS_USER(s)); - tcg_gen_addi_i32(addr, addr, 4); - tmp = load_reg(s, rd + 1); - gen_st32(tmp, addr, IS_USER(s)); - load = 0; - } else { - /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, rd, tmp); - tcg_gen_addi_i32(addr, addr, 4); - tmp = gen_ld32(addr, IS_USER(s)); - rd++; - load = 1; - } - address_offset = -4; - } else { - /* store */ - tmp = load_reg(s, rd); - gen_st16(tmp, addr, IS_USER(s)); - load = 0; - } - /* Perform base writeback before the loaded value to - ensure correct behavior with overlapping index registers. - ldrd with base writeback is is undefined if the - destination and index registers overlap. */ - if (!(insn & (1 << 24))) { - gen_add_datah_offset(s, insn, address_offset, addr); - store_reg(s, rn, addr); - } else if (insn & (1 << 21)) { - if (address_offset) - tcg_gen_addi_i32(addr, addr, address_offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); - } - if (load) { - /* Complete the load. */ - store_reg(s, rd, tmp); - } - } - break; - case 0x4: - case 0x5: - goto do_ldst; - case 0x6: - case 0x7: - if (insn & (1 << 4)) { - ARCH(6); - /* Armv6 Media instructions. */ - rm = insn & 0xf; - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - rs = (insn >> 8) & 0xf; - switch ((insn >> 23) & 3) { - case 0: /* Parallel add/subtract. */ - op1 = (insn >> 20) & 7; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - sh = (insn >> 5) & 7; - if ((op1 & 3) == 0 || sh == 5 || sh == 6) - goto illegal_op; - gen_arm_parallel_addsub(op1, sh, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - break; - case 1: - if ((insn & 0x00700020) == 0) { - /* Halfword pack. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - if (insn & (1 << 6)) { - /* pkhtb */ - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tmp2, tmp2, shift); - tcg_gen_andi_i32(tmp, tmp, 0xffff0000); - tcg_gen_ext16u_i32(tmp2, tmp2); - } else { - /* pkhbt */ - if (shift) - tcg_gen_shli_i32(tmp2, tmp2, shift); - tcg_gen_ext16u_i32(tmp, tmp); - tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); - } - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00200020) == 0x00200000) { - /* [us]sat */ - tmp = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - if (insn & (1 << 6)) { - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tmp, tmp, shift); - } else { - tcg_gen_shli_i32(tmp, tmp, shift); - } - sh = (insn >> 16) & 0x1f; - tmp2 = tcg_const_i32(sh); - if (insn & (1 << 22)) - gen_helper_usat(tmp, tmp, tmp2); - else - gen_helper_ssat(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00300fe0) == 0x00200f20) { - /* [us]sat16 */ - tmp = load_reg(s, rm); - sh = (insn >> 16) & 0x1f; - tmp2 = tcg_const_i32(sh); - if (insn & (1 << 22)) - gen_helper_usat16(tmp, tmp, tmp2); - else - gen_helper_ssat16(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00700fe0) == 0x00000fa0) { - /* Select bytes. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - tmp3 = tcg_temp_new_i32(); - tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); - gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tmp3); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x000003e0) == 0x00000060) { - tmp = load_reg(s, rm); - shift = (insn >> 10) & 3; - /* ??? In many cases it's not necessary to do a - rotate, a shift is sufficient. */ - if (shift != 0) - tcg_gen_rotri_i32(tmp, tmp, shift * 8); - op1 = (insn >> 20) & 7; - switch (op1) { - case 0: gen_sxtb16(tmp); break; - case 2: gen_sxtb(tmp); break; - case 3: gen_sxth(tmp); break; - case 4: gen_uxtb16(tmp); break; - case 6: gen_uxtb(tmp); break; - case 7: gen_uxth(tmp); break; - default: goto illegal_op; - } - if (rn != 15) { - tmp2 = load_reg(s, rn); - if ((op1 & 3) == 0) { - gen_add16(tmp, tmp2); - } else { - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - } - store_reg(s, rd, tmp); - } else if ((insn & 0x003f0f60) == 0x003f0f20) { - /* rev */ - tmp = load_reg(s, rm); - if (insn & (1 << 22)) { - if (insn & (1 << 7)) { - gen_revsh(tmp); - } else { - ARCH(6T2); - gen_helper_rbit(tmp, tmp); - } - } else { - if (insn & (1 << 7)) - gen_rev16(tmp); - else - tcg_gen_bswap32_i32(tmp, tmp); - } - store_reg(s, rd, tmp); - } else { - goto illegal_op; - } - break; - case 2: /* Multiplies (Type 3). */ - switch ((insn >> 20) & 0x7) { - case 5: - if (((insn >> 6) ^ (insn >> 7)) & 1) { - /* op2 not 00x or 11x : UNDEF */ - goto illegal_op; - } - /* Signed multiply most significant [accumulate]. - (SMMUL, SMMLA, SMMLS) */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - tmp64 = gen_muls_i64_i32(tmp, tmp2); - - if (rd != 15) { - tmp = load_reg(s, rd); - if (insn & (1 << 6)) { - tmp64 = gen_subq_msw(tmp64, tmp); - } else { - tmp64 = gen_addq_msw(tmp64, tmp); - } - } - if (insn & (1 << 5)) { - tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); - } - tcg_gen_shri_i64(tmp64, tmp64, 32); - tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); - store_reg(s, rn, tmp); - break; - case 0: - case 4: - /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ - if (insn & (1 << 7)) { - goto illegal_op; - } - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (insn & (1 << 5)) - gen_swap_half(tmp2); - gen_smul_dual(tmp, tmp2); - if (insn & (1 << 6)) { - /* This subtraction cannot overflow. */ - tcg_gen_sub_i32(tmp, tmp, tmp2); - } else { - /* This addition cannot overflow 32 bits; - * however it may overflow considered as a signed - * operation, in which case we must set the Q flag. - */ - gen_helper_add_setq(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp2); - if (insn & (1 << 22)) { - /* smlald, smlsld */ - tmp64 = tcg_temp_new_i64(); - tcg_gen_ext_i32_i64(tmp64, tmp); - tcg_temp_free_i32(tmp); - gen_addq(s, tmp64, rd, rn); - gen_storeq_reg(s, rd, rn, tmp64); - tcg_temp_free_i64(tmp64); - } else { - /* smuad, smusd, smlad, smlsd */ - if (rd != 15) - { - tmp2 = load_reg(s, rd); - gen_helper_add_setq(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - store_reg(s, rn, tmp); - } - break; - case 1: - case 3: - /* SDIV, UDIV */ - if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) { - goto illegal_op; - } - if (((insn >> 5) & 7) || (rd != 15)) { - goto illegal_op; + goto illegal_op; } - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (insn & (1 << 21)) { - gen_helper_udiv(tmp, tmp, tmp2); + } else { + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (insn & (1 << 23)) { + /* load/store exclusive */ + op1 = (insn >> 21) & 0x3; + if (op1) + ARCH(6K); + else + ARCH(6); + addr = tcg_temp_local_new_i32(); + load_reg_var(s, addr, rn); + if (insn & (1 << 20)) { + switch (op1) { + case 0: /* ldrex */ + gen_load_exclusive(s, rd, 15, addr, 2); + break; + case 1: /* ldrexd */ + gen_load_exclusive(s, rd, rd + 1, addr, 3); + break; + case 2: /* ldrexb */ + gen_load_exclusive(s, rd, 15, addr, 0); + break; + case 3: /* ldrexh */ + gen_load_exclusive(s, rd, 15, addr, 1); + break; + default: + abort(); + } + } else { + rm = insn & 0xf; + switch (op1) { + case 0: /* strex */ + gen_store_exclusive(s, rd, rm, 15, addr, 2); + break; + case 1: /* strexd */ + gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); + break; + case 2: /* strexb */ + gen_store_exclusive(s, rd, rm, 15, addr, 0); + break; + case 3: /* strexh */ + gen_store_exclusive(s, rd, rm, 15, addr, 1); + break; + default: + abort(); + } + } + tcg_temp_free(addr); } else { - gen_helper_sdiv(tmp, tmp, tmp2); + /* SWP instruction */ + rm = (insn) &0xf; + + /* ??? This is not really atomic. However we know + we never have multiple CPUs running in parallel, + so it is good enough. */ + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + if (insn & (1 << 22)) { + tmp2 = gen_ld8u(addr, IS_USER(s)); + gen_st8(tmp, addr, IS_USER(s)); + } else { + tmp2 = gen_ld32(addr, IS_USER(s)); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + store_reg(s, rd, tmp2); } - tcg_temp_free_i32(tmp2); - store_reg(s, rn, tmp); - break; - default: - goto illegal_op; } - break; - case 3: - op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); - switch (op1) { - case 0: /* Unsigned sum of absolute differences. */ - ARCH(6); - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - gen_helper_usad8(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - if (rd != 15) { - tmp2 = load_reg(s, rd); - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); + } else { + int address_offset; + int load; + /* Misc load/store */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + addr = load_reg(s, rn); + if (insn & (1 << 24)) + gen_add_datah_offset(s, insn, 0, addr); + address_offset = 0; + if (insn & (1 << 20)) { + /* load */ + switch (sh) { + case 1: + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 2: + tmp = gen_ld8s(addr, IS_USER(s)); + break; + default: + case 3: + tmp = gen_ld16s(addr, IS_USER(s)); + break; } - store_reg(s, rn, tmp); - break; - case 0x20: case 0x24: case 0x28: case 0x2c: - /* Bitfield insert/clear. */ - ARCH(6T2); - shift = (insn >> 7) & 0x1f; - i = (insn >> 16) & 0x1f; - i = i + 1 - shift; - if (rm == 15) { - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); + load = 1; + } else if (sh & 2) { + ARCH(5TE); + /* doubleword */ + if (sh & 1) { + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rd + 1); + gen_st32(tmp, addr, IS_USER(s)); + load = 0; } else { - tmp = load_reg(s, rm); - } - if (i != 32) { - tmp2 = load_reg(s, rd); - gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1); - tcg_temp_free_i32(tmp2); + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + tcg_gen_addi_i32(addr, addr, 4); + tmp = gen_ld32(addr, IS_USER(s)); + rd++; + load = 1; } + address_offset = -4; + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st16(tmp, addr, IS_USER(s)); + load = 0; + } + /* Perform base writeback before the loaded value to + ensure correct behavior with overlapping index registers. + ldrd with base writeback is is undefined if the + destination and index registers overlap. */ + if (!(insn & (1 << 24))) { + gen_add_datah_offset(s, insn, address_offset, addr); + store_reg(s, rn, addr); + } else if (insn & (1 << 21)) { + if (address_offset) + tcg_gen_addi_i32(addr, addr, address_offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } + if (load) { + /* Complete the load. */ store_reg(s, rd, tmp); - break; - case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */ - case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */ - ARCH(6T2); - tmp = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - i = ((insn >> 16) & 0x1f) + 1; - if (shift + i > 32) - goto illegal_op; - if (i < 32) { - if (op1 & 0x20) { - gen_ubfx(tmp, shift, (1u << i) - 1); + } + } + break; + case 0x4: + case 0x5: + goto do_ldst; + case 0x6: + case 0x7: + if (insn & (1 << 4)) { + ARCH(6); + /* Armv6 Media instructions. */ + rm = insn & 0xf; + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + switch ((insn >> 23) & 3) { + case 0: /* Parallel add/subtract. */ + op1 = (insn >> 20) & 7; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + sh = (insn >> 5) & 7; + if ((op1 & 3) == 0 || sh == 5 || sh == 6) + goto illegal_op; + gen_arm_parallel_addsub(op1, sh, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 1: + if ((insn & 0x00700020) == 0) { + /* Halfword pack. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp2, tmp2, shift); + tcg_gen_andi_i32(tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tmp2, tmp2); + } else { + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); + } + tcg_gen_or_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00200020) == 0x00200000) { + /* [us]sat */ + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp, tmp, shift); + } else { + tcg_gen_shli_i32(tmp, tmp, shift); + } + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(sh); + if (insn & (1 << 22)) + gen_helper_usat(tmp, tmp, tmp2); + else + gen_helper_ssat(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00300fe0) == 0x00200f20) { + /* [us]sat16 */ + tmp = load_reg(s, rm); + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(sh); + if (insn & (1 << 22)) + gen_helper_usat16(tmp, tmp, tmp2); + else + gen_helper_ssat16(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00700fe0) == 0x00000fa0) { + /* Select bytes. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x000003e0) == 0x00000060) { + tmp = load_reg(s, rm); + shift = (insn >> 10) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tmp, tmp, shift * 8); + op1 = (insn >> 20) & 7; + switch (op1) { + case 0: + gen_sxtb16(tmp); + break; + case 2: + gen_sxtb(tmp); + break; + case 3: + gen_sxth(tmp); + break; + case 4: + gen_uxtb16(tmp); + break; + case 6: + gen_uxtb(tmp); + break; + case 7: + gen_uxth(tmp); + break; + default: + goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op1 & 3) == 0) { + gen_add16(tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + } + store_reg(s, rd, tmp); + } else if ((insn & 0x003f0f60) == 0x003f0f20) { + /* rev */ + tmp = load_reg(s, rm); + if (insn & (1 << 22)) { + if (insn & (1 << 7)) { + gen_revsh(tmp); + } else { + ARCH(6T2); + gen_helper_rbit(tmp, tmp); + } + } else { + if (insn & (1 << 7)) + gen_rev16(tmp); + else + tcg_gen_bswap32_i32(tmp, tmp); + } + store_reg(s, rd, tmp); } else { - gen_sbfx(tmp, shift, i); + goto illegal_op; } - } - store_reg(s, rd, tmp); - break; - default: - goto illegal_op; + break; + case 2: /* Multiplies (Type 3). */ + switch ((insn >> 20) & 0x7) { + case 5: + if (((insn >> 6) ^ (insn >> 7)) & 1) { + /* op2 not 00x or 11x : UNDEF */ + goto illegal_op; + } + /* Signed multiply most significant [accumulate]. + (SMMUL, SMMLA, SMMLS) */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + + if (rd != 15) { + tmp = load_reg(s, rd); + if (insn & (1 << 6)) { + tmp64 = gen_subq_msw(tmp64, tmp); + } else { + tmp64 = gen_addq_msw(tmp64, tmp); + } + } + if (insn & (1 << 5)) { + tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + store_reg(s, rn, tmp); + break; + case 0: + case 4: + /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ + if (insn & (1 << 7)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 5)) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (insn & (1 << 6)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a signed + * operation, in which case we must set the Q flag. + */ + gen_helper_add_setq(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + if (insn & (1 << 22)) { + /* smlald, smlsld */ + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rd, rn); + gen_storeq_reg(s, rd, rn, tmp64); + tcg_temp_free_i64(tmp64); + } else { + /* smuad, smusd, smlad, smlsd */ + if (rd != 15) { + tmp2 = load_reg(s, rd); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rn, tmp); + } + break; + case 1: + case 3: + /* SDIV, UDIV */ + if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) { + goto illegal_op; + } + if (((insn >> 5) & 7) || (rd != 15)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 21)) { + gen_helper_udiv(tmp, tmp, tmp2); + } else { + gen_helper_sdiv(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + store_reg(s, rn, tmp); + break; + default: + goto illegal_op; + } + break; + case 3: + op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); + switch (op1) { + case 0: /* Unsigned sum of absolute differences. */ + ARCH(6); + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_helper_usad8(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rd != 15) { + tmp2 = load_reg(s, rd); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rn, tmp); + break; + case 0x20: + case 0x24: + case 0x28: + case 0x2c: + /* Bitfield insert/clear. */ + ARCH(6T2); + shift = (insn >> 7) & 0x1f; + i = (insn >> 16) & 0x1f; + i = i + 1 - shift; + if (rm == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rm); + } + if (i != 32) { + tmp2 = load_reg(s, rd); + gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1); + tcg_temp_free_i32(tmp2); + } + store_reg(s, rd, tmp); + break; + case 0x12: + case 0x16: + case 0x1a: + case 0x1e: /* sbfx */ + case 0x32: + case 0x36: + case 0x3a: + case 0x3e: /* ubfx */ + ARCH(6T2); + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + i = ((insn >> 16) & 0x1f) + 1; + if (shift + i > 32) + goto illegal_op; + if (i < 32) { + if (op1 & 0x20) { + gen_ubfx(tmp, shift, (1u << i) - 1); + } else { + gen_sbfx(tmp, shift, i); + } + } + store_reg(s, rd, tmp); + break; + default: + goto illegal_op; + } + break; } break; } - break; - } - do_ldst: - /* Check for undefined extension instructions - * per the ARM Bible IE: - * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx - */ - sh = (0xf << 20) | (0xf << 4); - if (op1 == 0x7 && ((insn & sh) == sh)) - { - goto illegal_op; - } - /* load/store byte/word */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - tmp2 = load_reg(s, rn); - i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000); - if (insn & (1 << 24)) - gen_add_data_offset(s, insn, tmp2); - if (insn & (1 << 20)) { - /* load */ - if (insn & (1 << 22)) { - tmp = gen_ld8u(tmp2, i); + do_ldst: + /* Check for undefined extension instructions + * per the ARM Bible IE: + * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx + */ + sh = (0xf << 20) | (0xf << 4); + if (op1 == 0x7 && ((insn & sh) == sh)) { + goto illegal_op; + } + /* load/store byte/word */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + tmp2 = load_reg(s, rn); + i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000); + if (insn & (1 << 24)) + gen_add_data_offset(s, insn, tmp2); + if (insn & (1 << 20)) { + /* load */ + if (insn & (1 << 22)) { + tmp = gen_ld8u(tmp2, i); + } else { + tmp = gen_ld32(tmp2, i); + } } else { - tmp = gen_ld32(tmp2, i); + /* store */ + tmp = load_reg(s, rd); + if (insn & (1 << 22)) + gen_st8(tmp, tmp2, i); + else + gen_st32(tmp, tmp2, i); } - } else { - /* store */ - tmp = load_reg(s, rd); - if (insn & (1 << 22)) - gen_st8(tmp, tmp2, i); - else - gen_st32(tmp, tmp2, i); - } - if (!(insn & (1 << 24))) { - gen_add_data_offset(s, insn, tmp2); - store_reg(s, rn, tmp2); - } else if (insn & (1 << 21)) { - store_reg(s, rn, tmp2); - } else { - tcg_temp_free_i32(tmp2); - } - if (insn & (1 << 20)) { - /* Complete the load. */ - store_reg_from_load(env, s, rd, tmp); - } - break; - case 0x08: - case 0x09: - { + if (!(insn & (1 << 24))) { + gen_add_data_offset(s, insn, tmp2); + store_reg(s, rn, tmp2); + } else if (insn & (1 << 21)) { + store_reg(s, rn, tmp2); + } else { + tcg_temp_free_i32(tmp2); + } + if (insn & (1 << 20)) { + /* Complete the load. */ + store_reg_from_load(env, s, rd, tmp); + } + break; + case 0x08: + case 0x09: { int j, n, user, loaded_base; TCGv loaded_var; /* load/store multiple words */ @@ -7888,7 +8156,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) loaded_base = 0; TCGV_UNUSED(loaded_var); n = 0; - for(i=0;i<16;i++) { + for (i = 0; i < 16; i++) { if (insn & (1 << i)) n++; } @@ -7907,11 +8175,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) } else { /* post decrement */ if (n != 1) - tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); + tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } j = 0; - for(i=0;i<16;i++) { + for (i = 0; i < 16; i++) { if (insn & (1 << i)) { if (insn & (1 << 20)) { /* load */ @@ -7931,7 +8199,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) /* store */ if (i == 15) { /* special case: r15 = PC + 8 */ - val = (long)s->pc + 4; + val = (long) s->pc + 4; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); } else if (user) { @@ -7983,49 +8251,44 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) tcg_temp_free_i32(tmp); s->is_jmp = DISAS_UPDATE; } - } - break; - case 0xa: - case 0xb: - { + } break; + case 0xa: + case 0xb: { int32_t offset; /* branch (and link) */ - val = (int32_t)s->pc; + val = (int32_t) s->pc; if (insn & (1 << 24)) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); store_reg(s, 14, tmp); } - offset = (((int32_t)insn << 8) >> 8); + offset = (((int32_t) insn << 8) >> 8); val += (offset << 2) + 4; gen_jmp(s, val); - } - break; - case 0xc: - case 0xd: - case 0xe: - /* Coprocessor. */ - if (disas_coproc_insn(env, s, insn)) - goto illegal_op; - break; - case 0xf: - /* swi */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_SWI; - break; - default: - illegal_op: - gen_exception_insn(s, 4, EXCP_UDEF); - break; + } break; + case 0xc: + case 0xd: + case 0xe: + /* Coprocessor. */ + if (disas_coproc_insn(env, s, insn)) + goto illegal_op; + break; + case 0xf: + /* swi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_SWI; + break; + default: + illegal_op: + gen_exception_insn(s, 4, EXCP_UDEF); + break; } } } /* Return true if this is a Thumb-2 logical op. */ -static int -thumb2_logic_op(int op) -{ +static int thumb2_logic_op(int op) { return (op < 8); } @@ -8035,65 +8298,63 @@ thumb2_logic_op(int op) to the high bit of T1. Returns zero if the opcode is valid. */ -static int -gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1) -{ +static int gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1) { int logic_cc; logic_cc = 0; switch (op) { - case 0: /* and */ - tcg_gen_and_i32(t0, t0, t1); - logic_cc = conds; - break; - case 1: /* bic */ - tcg_gen_andc_i32(t0, t0, t1); - logic_cc = conds; - break; - case 2: /* orr */ - tcg_gen_or_i32(t0, t0, t1); - logic_cc = conds; - break; - case 3: /* orn */ - tcg_gen_orc_i32(t0, t0, t1); - logic_cc = conds; - break; - case 4: /* eor */ - tcg_gen_xor_i32(t0, t0, t1); - logic_cc = conds; - break; - case 8: /* add */ - if (conds) - gen_helper_add_cc(t0, t0, t1); - else - tcg_gen_add_i32(t0, t0, t1); - break; - case 10: /* adc */ - if (conds) - gen_helper_adc_cc(t0, t0, t1); - else - gen_adc(t0, t1); - break; - case 11: /* sbc */ - if (conds) - gen_helper_sbc_cc(t0, t0, t1); - else - gen_sub_carry(t0, t0, t1); - break; - case 13: /* sub */ - if (conds) - gen_helper_sub_cc(t0, t0, t1); - else - tcg_gen_sub_i32(t0, t0, t1); - break; - case 14: /* rsb */ - if (conds) - gen_helper_sub_cc(t0, t1, t0); - else - tcg_gen_sub_i32(t0, t1, t0); - break; - default: /* 5, 6, 7, 9, 12, 15. */ - return 1; + case 0: /* and */ + tcg_gen_and_i32(t0, t0, t1); + logic_cc = conds; + break; + case 1: /* bic */ + tcg_gen_andc_i32(t0, t0, t1); + logic_cc = conds; + break; + case 2: /* orr */ + tcg_gen_or_i32(t0, t0, t1); + logic_cc = conds; + break; + case 3: /* orn */ + tcg_gen_orc_i32(t0, t0, t1); + logic_cc = conds; + break; + case 4: /* eor */ + tcg_gen_xor_i32(t0, t0, t1); + logic_cc = conds; + break; + case 8: /* add */ + if (conds) + gen_helper_add_cc(t0, t0, t1); + else + tcg_gen_add_i32(t0, t0, t1); + break; + case 10: /* adc */ + if (conds) + gen_helper_adc_cc(t0, t0, t1); + else + gen_adc(t0, t1); + break; + case 11: /* sbc */ + if (conds) + gen_helper_sbc_cc(t0, t0, t1); + else + gen_sub_carry(t0, t0, t1); + break; + case 13: /* sub */ + if (conds) + gen_helper_sub_cc(t0, t0, t1); + else + tcg_gen_sub_i32(t0, t0, t1); + break; + case 14: /* rsb */ + if (conds) + gen_helper_sub_cc(t0, t1, t0); + else + tcg_gen_sub_i32(t0, t1, t0); + break; + default: /* 5, 6, 7, 9, 12, 15. */ + return 1; } if (logic_cc) { gen_logic_CC(t0); @@ -8105,8 +8366,7 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCG /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction is not legal. */ -static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) -{ +static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) { uint32_t insn, imm, shift, offset; uint32_t rd, rn, rm, rs; TCGv tmp; @@ -8119,8 +8379,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw int conds; int logic_cc; - if (!(arm_feature(env, ARM_FEATURE_THUMB2) - || arm_feature (env, ARM_FEATURE_M))) { + if (!(arm_feature(env, ARM_FEATURE_THUMB2) || arm_feature(env, ARM_FEATURE_M))) { /* Thumb-1 cores may need to treat bl and blx as a pair of 16-bit instructions to get correct prefetch abort behavior. */ insn = insn_hw1; @@ -8154,7 +8413,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw /* Instruction spans a page boundary. Implement it as two 16-bit instructions in case the second half causes an prefetch abort. */ - offset = ((int32_t)insn << 21) >> 9; + offset = ((int32_t) insn << 21) >> 9; tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); return 0; } @@ -8163,7 +8422,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw insn = arm_lduw_code(s->pc, s->bswap_code); s->pc += 2; - insn |= (uint32_t)insn_hw1 << 16; + insn |= (uint32_t) insn_hw1 << 16; if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); @@ -8174,1010 +8433,1043 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw rd = (insn >> 8) & 0xf; rm = insn & 0xf; switch ((insn >> 25) & 0xf) { - case 0: case 1: case 2: case 3: - /* 16-bit instructions. Should never happen. */ - abort(); - case 4: - if (insn & (1 << 22)) { - /* Other load/store, table branch. */ - if (insn & 0x01200000) { - /* Load/store doubleword. */ - if (rn == 15) { - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~3); - } else { - addr = load_reg(s, rn); - } - offset = (insn & 0xff) * 4; - if ((insn & (1 << 23)) == 0) - offset = -offset; - if (insn & (1 << 24)) { - tcg_gen_addi_i32(addr, addr, offset); - offset = 0; - } - if (insn & (1 << 20)) { - /* ldrd */ - tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, rs, tmp); - tcg_gen_addi_i32(addr, addr, 4); - tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, rd, tmp); - } else { - /* strd */ - tmp = load_reg(s, rs); - gen_st32(tmp, addr, IS_USER(s)); - tcg_gen_addi_i32(addr, addr, 4); - tmp = load_reg(s, rd); - gen_st32(tmp, addr, IS_USER(s)); - } - if (insn & (1 << 21)) { - /* Base writeback. */ - if (rn == 15) - goto illegal_op; - tcg_gen_addi_i32(addr, addr, offset - 4); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); - } - } else if ((insn & (1 << 23)) == 0) { - /* Load/store exclusive word. */ - addr = tcg_temp_local_new(); - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2); - if (insn & (1 << 20)) { - gen_load_exclusive(s, rs, 15, addr, 2); - } else { - gen_store_exclusive(s, rd, rs, 15, addr, 2); - } - tcg_temp_free(addr); - } else if ((insn & (1 << 6)) == 0) { - /* Table Branch. */ - if (rn == 15) { - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc); - } else { - addr = load_reg(s, rn); - } - tmp = load_reg(s, rm); - tcg_gen_add_i32(addr, addr, tmp); - if (insn & (1 << 4)) { - /* tbh */ - tcg_gen_add_i32(addr, addr, tmp); - tcg_temp_free_i32(tmp); - tmp = gen_ld16u(addr, IS_USER(s)); - } else { /* tbb */ - tcg_temp_free_i32(tmp); - tmp = gen_ld8u(addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - tcg_gen_shli_i32(tmp, tmp, 1); - tcg_gen_addi_i32(tmp, tmp, s->pc); - store_reg(s, 15, tmp); - } else { - /* Load/store exclusive byte/halfword/doubleword. */ - ARCH(7); - op = (insn >> 4) & 0x3; - if (op == 2) { - goto illegal_op; - } - addr = tcg_temp_local_new(); - load_reg_var(s, addr, rn); - if (insn & (1 << 20)) { - gen_load_exclusive(s, rs, rd, addr, op); - } else { - gen_store_exclusive(s, rm, rs, rd, addr, op); - } - tcg_temp_free(addr); - } - } else { - /* Load/store multiple, RFE, SRS. */ - if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { - /* Not available in user mode. */ - if (IS_USER(s)) - goto illegal_op; - if (insn & (1 << 20)) { - /* rfe */ - addr = load_reg(s, rn); - if ((insn & (1 << 24)) == 0) - tcg_gen_addi_i32(addr, addr, -8); - /* Load PC into tmp and CPSR into tmp2. */ - tmp = gen_ld32(addr, 0); - tcg_gen_addi_i32(addr, addr, 4); - tmp2 = gen_ld32(addr, 0); + case 0: + case 1: + case 2: + case 3: + /* 16-bit instructions. Should never happen. */ + abort(); + case 4: + if (insn & (1 << 22)) { + /* Other load/store, table branch. */ + if (insn & 0x01200000) { + /* Load/store doubleword. */ + if (rn == 15) { + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~3); + } else { + addr = load_reg(s, rn); + } + offset = (insn & 0xff) * 4; + if ((insn & (1 << 23)) == 0) + offset = -offset; + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, offset); + offset = 0; + } + if (insn & (1 << 20)) { + /* ldrd */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rs, tmp); + tcg_gen_addi_i32(addr, addr, 4); + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* strd */ + tmp = load_reg(s, rs); + gen_st32(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + } if (insn & (1 << 21)) { /* Base writeback. */ - if (insn & (1 << 24)) { - tcg_gen_addi_i32(addr, addr, 4); - } else { - tcg_gen_addi_i32(addr, addr, -4); - } + if (rn == 15) + goto illegal_op; + tcg_gen_addi_i32(addr, addr, offset - 4); store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } - gen_rfe(s, tmp, tmp2); - } else { - /* srs */ - op = (insn & 0x1f); - addr = tcg_temp_new_i32(); - tmp = tcg_const_i32(op); - gen_helper_get_r13_banked(addr, cpu_env, tmp); - tcg_temp_free_i32(tmp); - if ((insn & (1 << 24)) == 0) { - tcg_gen_addi_i32(addr, addr, -8); + } else if ((insn & (1 << 23)) == 0) { + /* Load/store exclusive word. */ + addr = tcg_temp_local_new(); + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2); + if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, 15, addr, 2); + } else { + gen_store_exclusive(s, rd, rs, 15, addr, 2); } - tmp = load_reg(s, 14); - gen_st32(tmp, addr, 0); - tcg_gen_addi_i32(addr, addr, 4); - tmp = tcg_temp_new_i32(); - gen_helper_cpsr_read(tmp); - gen_st32(tmp, addr, 0); - if (insn & (1 << 21)) { - if ((insn & (1 << 24)) == 0) { - tcg_gen_addi_i32(addr, addr, -4); - } else { - tcg_gen_addi_i32(addr, addr, 4); - } - tmp = tcg_const_i32(op); - gen_helper_set_r13_banked(cpu_env, tmp, addr); + tcg_temp_free(addr); + } else if ((insn & (1 << 6)) == 0) { + /* Table Branch. */ + if (rn == 15) { + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc); + } else { + addr = load_reg(s, rn); + } + tmp = load_reg(s, rm); + tcg_gen_add_i32(addr, addr, tmp); + if (insn & (1 << 4)) { + /* tbh */ + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + tmp = gen_ld16u(addr, IS_USER(s)); + } else { /* tbb */ tcg_temp_free_i32(tmp); + tmp = gen_ld8u(addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + tcg_gen_shli_i32(tmp, tmp, 1); + tcg_gen_addi_i32(tmp, tmp, s->pc); + store_reg(s, 15, tmp); + } else { + /* Load/store exclusive byte/halfword/doubleword. */ + ARCH(7); + op = (insn >> 4) & 0x3; + if (op == 2) { + goto illegal_op; + } + addr = tcg_temp_local_new(); + load_reg_var(s, addr, rn); + if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, rd, addr, op); } else { - tcg_temp_free_i32(addr); + gen_store_exclusive(s, rm, rs, rd, addr, op); } + tcg_temp_free(addr); } } else { - int i, loaded_base = 0; - TCGv loaded_var; - /* Load/store multiple. */ - addr = load_reg(s, rn); - offset = 0; - for (i = 0; i < 16; i++) { - if (insn & (1 << i)) - offset += 4; - } - if (insn & (1 << 24)) { - tcg_gen_addi_i32(addr, addr, -offset); - } - - TCGV_UNUSED(loaded_var); - for (i = 0; i < 16; i++) { - if ((insn & (1 << i)) == 0) - continue; + /* Load/store multiple, RFE, SRS. */ + if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { + /* Not available in user mode. */ + if (IS_USER(s)) + goto illegal_op; if (insn & (1 << 20)) { - /* Load. */ - tmp = gen_ld32(addr, IS_USER(s)); - if (i == 15) { - gen_bx(s, tmp); - } else if (i == rn) { - loaded_var = tmp; - loaded_base = 1; + /* rfe */ + addr = load_reg(s, rn); + if ((insn & (1 << 24)) == 0) + tcg_gen_addi_i32(addr, addr, -8); + /* Load PC into tmp and CPSR into tmp2. */ + tmp = gen_ld32(addr, 0); + tcg_gen_addi_i32(addr, addr, 4); + tmp2 = gen_ld32(addr, 0); + if (insn & (1 << 21)) { + /* Base writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, 4); + } else { + tcg_gen_addi_i32(addr, addr, -4); + } + store_reg(s, rn, addr); } else { - store_reg(s, i, tmp); + tcg_temp_free_i32(addr); } + gen_rfe(s, tmp, tmp2); } else { - /* Store. */ - tmp = load_reg(s, i); - gen_st32(tmp, addr, IS_USER(s)); + /* srs */ + op = (insn & 0x1f); + addr = tcg_temp_new_i32(); + tmp = tcg_const_i32(op); + gen_helper_get_r13_banked(addr, cpu_env, tmp); + tcg_temp_free_i32(tmp); + if ((insn & (1 << 24)) == 0) { + tcg_gen_addi_i32(addr, addr, -8); + } + tmp = load_reg(s, 14); + gen_st32(tmp, addr, 0); + tcg_gen_addi_i32(addr, addr, 4); + tmp = tcg_temp_new_i32(); + gen_helper_cpsr_read(tmp); + gen_st32(tmp, addr, 0); + if (insn & (1 << 21)) { + if ((insn & (1 << 24)) == 0) { + tcg_gen_addi_i32(addr, addr, -4); + } else { + tcg_gen_addi_i32(addr, addr, 4); + } + tmp = tcg_const_i32(op); + gen_helper_set_r13_banked(cpu_env, tmp, addr); + tcg_temp_free_i32(tmp); + } else { + tcg_temp_free_i32(addr); + } + } + } else { + int i, loaded_base = 0; + TCGv loaded_var; + /* Load/store multiple. */ + addr = load_reg(s, rn); + offset = 0; + for (i = 0; i < 16; i++) { + if (insn & (1 << i)) + offset += 4; } - tcg_gen_addi_i32(addr, addr, 4); - } - if (loaded_base) { - store_reg(s, rn, loaded_var); - } - if (insn & (1 << 21)) { - /* Base register writeback. */ if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, -offset); } - /* Fault if writeback register is in register list. */ - if (insn & (1 << rn)) - goto illegal_op; - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); - } - } - } - break; - case 5: - - op = (insn >> 21) & 0xf; - if (op == 6) { - /* Halfword pack. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); - if (insn & (1 << 5)) { - /* pkhtb */ - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tmp2, tmp2, shift); - tcg_gen_andi_i32(tmp, tmp, 0xffff0000); - tcg_gen_ext16u_i32(tmp2, tmp2); - } else { - /* pkhbt */ - if (shift) - tcg_gen_shli_i32(tmp2, tmp2, shift); - tcg_gen_ext16u_i32(tmp, tmp); - tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); - } - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else { - /* Data processing register constant shift. */ - if (rn == 15) { - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); - } else { - tmp = load_reg(s, rn); - } - tmp2 = load_reg(s, rm); - shiftop = (insn >> 4) & 3; - shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); - conds = (insn & (1 << 20)) != 0; - logic_cc = (conds && thumb2_logic_op(op)); - gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); - if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) - goto illegal_op; - tcg_temp_free_i32(tmp2); - if (rd != 15) { - store_reg(s, rd, tmp); - } else { - tcg_temp_free_i32(tmp); - } - } - break; - case 13: /* Misc data processing. */ - op = ((insn >> 22) & 6) | ((insn >> 7) & 1); - if (op < 4 && (insn & 0xf000) != 0xf000) - goto illegal_op; - switch (op) { - case 0: /* Register controlled shift. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if ((insn & 0x70) != 0) - goto illegal_op; - op = (insn >> 21) & 3; - logic_cc = (insn & (1 << 20)) != 0; - gen_arm_shift_reg(tmp, op, tmp2, logic_cc); - if (logic_cc) - gen_logic_CC(tmp); - store_reg_bx(env, s, rd, tmp); - break; - case 1: /* Sign/zero extend. */ - tmp = load_reg(s, rm); - shift = (insn >> 4) & 3; - /* ??? In many cases it's not necessary to do a - rotate, a shift is sufficient. */ - if (shift != 0) - tcg_gen_rotri_i32(tmp, tmp, shift * 8); - op = (insn >> 20) & 7; - switch (op) { - case 0: gen_sxth(tmp); break; - case 1: gen_uxth(tmp); break; - case 2: gen_sxtb16(tmp); break; - case 3: gen_uxtb16(tmp); break; - case 4: gen_sxtb(tmp); break; - case 5: gen_uxtb(tmp); break; - default: goto illegal_op; - } - if (rn != 15) { - tmp2 = load_reg(s, rn); - if ((op >> 1) == 1) { - gen_add16(tmp, tmp2); - } else { - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); + TCGV_UNUSED(loaded_var); + for (i = 0; i < 16; i++) { + if ((insn & (1 << i)) == 0) + continue; + if (insn & (1 << 20)) { + /* Load. */ + tmp = gen_ld32(addr, IS_USER(s)); + if (i == 15) { + gen_bx(s, tmp); + } else if (i == rn) { + loaded_var = tmp; + loaded_base = 1; + } else { + store_reg(s, i, tmp); + } + } else { + /* Store. */ + tmp = load_reg(s, i); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, 4); + } + if (loaded_base) { + store_reg(s, rn, loaded_var); + } + if (insn & (1 << 21)) { + /* Base register writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(addr, addr, -offset); + } + /* Fault if writeback register is in register list. */ + if (insn & (1 << rn)) + goto illegal_op; + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(addr); + } } } - store_reg(s, rd, tmp); - break; - case 2: /* SIMD add/subtract. */ - op = (insn >> 20) & 7; - shift = (insn >> 4) & 7; - if ((op & 3) == 3 || (shift & 3) == 3) - goto illegal_op; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - gen_thumb2_parallel_addsub(op, shift, tmp, tmp2); - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); break; - case 3: /* Other data processing. */ - op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); - if (op < 4) { - /* Saturating add/subtract. */ + case 5: + + op = (insn >> 21) & 0xf; + if (op == 6) { + /* Halfword pack. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); - if (op & 1) - gen_helper_double_saturate(tmp, tmp); - if (op & 2) - gen_helper_sub_saturate(tmp, tmp2, tmp); - else - gen_helper_add_saturate(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } else { - tmp = load_reg(s, rn); - switch (op) { - case 0x0a: /* rbit */ - gen_helper_rbit(tmp, tmp); - break; - case 0x08: /* rev */ - tcg_gen_bswap32_i32(tmp, tmp); - break; - case 0x09: /* rev16 */ - gen_rev16(tmp); - break; - case 0x0b: /* revsh */ - gen_revsh(tmp); - break; - case 0x10: /* sel */ - tmp2 = load_reg(s, rm); - tmp3 = tcg_temp_new_i32(); - tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); - gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tmp3); - tcg_temp_free_i32(tmp2); - break; - case 0x18: /* clz */ - gen_helper_clz(tmp, tmp); - break; - default: - goto illegal_op; - } - } - store_reg(s, rd, tmp); - break; - case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ - op = (insn >> 4) & 0xf; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - switch ((insn >> 20) & 7) { - case 0: /* 32 x 32 -> 32 */ - tcg_gen_mul_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - if (op) - tcg_gen_sub_i32(tmp, tmp2, tmp); - else - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - break; - case 1: /* 16 x 16 -> 32 */ - gen_mulxy(tmp, tmp2, op & 2, op & 1); - tcg_temp_free_i32(tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - break; - case 2: /* Dual multiply add. */ - case 4: /* Dual multiply subtract. */ - if (op) - gen_swap_half(tmp2); - gen_smul_dual(tmp, tmp2); - if (insn & (1 << 22)) { - /* This subtraction cannot overflow. */ - tcg_gen_sub_i32(tmp, tmp, tmp2); + shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); + if (insn & (1 << 5)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tmp2, tmp2, shift); + tcg_gen_andi_i32(tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tmp2, tmp2); } else { - /* This addition cannot overflow 32 bits; - * however it may overflow considered as a signed - * operation, in which case we must set the Q flag. - */ - gen_helper_add_setq(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp2); - if (rs != 15) - { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - break; - case 3: /* 32 * 16 -> 32msb */ - if (op) - tcg_gen_sari_i32(tmp2, tmp2, 16); - else - gen_sxth(tmp2); - tmp64 = gen_muls_i64_i32(tmp, tmp2); - tcg_gen_shri_i64(tmp64, tmp64, 16); - tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); - if (rs != 15) - { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - break; - case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ - tmp64 = gen_muls_i64_i32(tmp, tmp2); - if (rs != 15) { - tmp = load_reg(s, rs); - if (insn & (1 << 20)) { - tmp64 = gen_addq_msw(tmp64, tmp); - } else { - tmp64 = gen_subq_msw(tmp64, tmp); - } - } - if (insn & (1 << 4)) { - tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); - } - tcg_gen_shri_i64(tmp64, tmp64, 32); - tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); - break; - case 7: /* Unsigned sum of absolute differences. */ - gen_helper_usad8(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - } - break; - } - store_reg(s, rd, tmp); - break; - case 6: case 7: /* 64-bit multiply, Divide. */ - op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if ((op & 0x50) == 0x10) { - /* sdiv, udiv */ - if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) { - goto illegal_op; + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); } - if (op & 0x20) - gen_helper_udiv(tmp, tmp, tmp2); - else - gen_helper_sdiv(tmp, tmp, tmp2); + tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); - } else if ((op & 0xe) == 0xc) { - /* Dual multiply accumulate long. */ - if (op & 1) - gen_swap_half(tmp2); - gen_smul_dual(tmp, tmp2); - if (op & 0x10) { - tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + /* Data processing register constant shift. */ + if (rn == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); } else { - tcg_gen_add_i32(tmp, tmp, tmp2); + tmp = load_reg(s, rn); } + tmp2 = load_reg(s, rm); + + shiftop = (insn >> 4) & 3; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + conds = (insn & (1 << 20)) != 0; + logic_cc = (conds && thumb2_logic_op(op)); + gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); + if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) + goto illegal_op; tcg_temp_free_i32(tmp2); - /* BUGFIX */ - tmp64 = tcg_temp_new_i64(); - tcg_gen_ext_i32_i64(tmp64, tmp); - tcg_temp_free_i32(tmp); - gen_addq(s, tmp64, rs, rd); - gen_storeq_reg(s, rs, rd, tmp64); - tcg_temp_free_i64(tmp64); - } else { - if (op & 0x20) { - /* Unsigned 64-bit multiply */ - tmp64 = gen_mulu_i64_i32(tmp, tmp2); + if (rd != 15) { + store_reg(s, rd, tmp); } else { - if (op & 8) { - /* smlalxy */ - gen_mulxy(tmp, tmp2, op & 2, op & 1); - tcg_temp_free_i32(tmp2); - tmp64 = tcg_temp_new_i64(); - tcg_gen_ext_i32_i64(tmp64, tmp); - tcg_temp_free_i32(tmp); - } else { - /* Signed 64-bit multiply */ - tmp64 = gen_muls_i64_i32(tmp, tmp2); - } - } - if (op & 4) { - /* umaal */ - gen_addq_lo(s, tmp64, rs); - gen_addq_lo(s, tmp64, rd); - } else if (op & 0x40) { - /* 64-bit accumulate. */ - gen_addq(s, tmp64, rs, rd); + tcg_temp_free_i32(tmp); } - gen_storeq_reg(s, rs, rd, tmp64); - tcg_temp_free_i64(tmp64); } break; - } - break; - case 6: case 7: case 14: case 15: - /* Coprocessor. */ - if (((insn >> 24) & 3) == 3) { - /* Translate into the equivalent ARM encoding. */ - insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); - if (disas_neon_data_insn(env, s, insn)) + case 13: /* Misc data processing. */ + op = ((insn >> 22) & 6) | ((insn >> 7) & 1); + if (op < 4 && (insn & 0xf000) != 0xf000) goto illegal_op; - } else { - if (insn & (1 << 28)) - goto illegal_op; - if (disas_coproc_insn (env, s, insn)) - goto illegal_op; - } - break; - case 8: case 9: case 10: case 11: - if (insn & (1 << 15)) { - /* Branches, misc control. */ - if (insn & 0x5000) { - /* Unconditional branch. */ - /* signextend(hw1[10:0]) -> offset[:12]. */ - offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff; - /* hw1[10:0] -> offset[11:1]. */ - offset |= (insn & 0x7ff) << 1; - /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] - offset[24:22] already have the same value because of the - sign extension above. */ - offset ^= ((~insn) & (1 << 13)) << 10; - offset ^= ((~insn) & (1 << 11)) << 11; - - if (insn & (1 << 14)) { - /* Branch and link. */ - tcg_gen_movi_i32(cpu_R[14], s->pc | 1); - } - - offset += s->pc; - if (insn & (1 << 12)) { - /* b/bl */ - gen_jmp(s, offset); - } else { - /* blx */ - offset &= ~(uint32_t)2; - /* thumb2 bx, no need to check */ - gen_bx_im(s, offset); - } - } else if (((insn >> 23) & 7) == 7) { - /* Misc control */ - if (insn & (1 << 13)) - goto illegal_op; - - if (insn & (1 << 26)) { - /* Secure monitor call (v6Z) */ - goto illegal_op; /* not implemented. */ - } else { + switch (op) { + case 0: /* Register controlled shift. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((insn & 0x70) != 0) + goto illegal_op; + op = (insn >> 21) & 3; + logic_cc = (insn & (1 << 20)) != 0; + gen_arm_shift_reg(tmp, op, tmp2, logic_cc); + if (logic_cc) + gen_logic_CC(tmp); + store_reg_bx(env, s, rd, tmp); + break; + case 1: /* Sign/zero extend. */ + tmp = load_reg(s, rm); + shift = (insn >> 4) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tmp, tmp, shift * 8); op = (insn >> 20) & 7; switch (op) { - case 0: /* msr cpsr. */ - if (IS_M(env)) { - tmp = load_reg(s, rn); - addr = tcg_const_i32(insn & 0xff); - gen_helper_v7m_msr(cpu_env, addr, tmp); - tcg_temp_free_i32(addr); - tcg_temp_free_i32(tmp); - gen_lookup_tb(s); + case 0: + gen_sxth(tmp); break; - } - /* fall through */ - case 1: /* msr spsr. */ - if (IS_M(env)) + case 1: + gen_uxth(tmp); + break; + case 2: + gen_sxtb16(tmp); + break; + case 3: + gen_uxtb16(tmp); + break; + case 4: + gen_sxtb(tmp); + break; + case 5: + gen_uxtb(tmp); + break; + default: goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op >> 1) == 1) { + gen_add16(tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + } + store_reg(s, rd, tmp); + break; + case 2: /* SIMD add/subtract. */ + op = (insn >> 20) & 7; + shift = (insn >> 4) & 7; + if ((op & 3) == 3 || (shift & 3) == 3) + goto illegal_op; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + gen_thumb2_parallel_addsub(op, shift, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* Other data processing. */ + op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); + if (op < 4) { + /* Saturating add/subtract. */ tmp = load_reg(s, rn); - if (gen_set_psr(s, - msr_mask(env, s, (insn >> 8) & 0xf, op == 1), - op == 1, tmp)) - goto illegal_op; - break; - case 2: /* cps, nop-hint. */ - if (((insn >> 8) & 7) == 0) { - gen_nop_hint(s, insn & 0xff); + tmp2 = load_reg(s, rm); + if (op & 1) + gen_helper_double_saturate(tmp, tmp); + if (op & 2) + gen_helper_sub_saturate(tmp, tmp2, tmp); + else + gen_helper_add_saturate(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } else { + tmp = load_reg(s, rn); + switch (op) { + case 0x0a: /* rbit */ + gen_helper_rbit(tmp, tmp); + break; + case 0x08: /* rev */ + tcg_gen_bswap32_i32(tmp, tmp); + break; + case 0x09: /* rev16 */ + gen_rev16(tmp); + break; + case 0x0b: /* revsh */ + gen_revsh(tmp); + break; + case 0x10: /* sel */ + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tmp3); + tcg_temp_free_i32(tmp2); + break; + case 0x18: /* clz */ + gen_helper_clz(tmp, tmp); + break; + default: + goto illegal_op; } - /* Implemented as NOP in user mode. */ - if (IS_USER(s)) + } + store_reg(s, rd, tmp); + break; + case 4: + case 5: /* 32-bit multiply. Sum of absolute differences. */ + op = (insn >> 4) & 0xf; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + switch ((insn >> 20) & 7) { + case 0: /* 32 x 32 -> 32 */ + tcg_gen_mul_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + if (op) + tcg_gen_sub_i32(tmp, tmp2, tmp); + else + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } break; - offset = 0; - imm = 0; - if (insn & (1 << 10)) { - if (insn & (1 << 7)) - offset |= CPSR_A; - if (insn & (1 << 6)) - offset |= CPSR_I; - if (insn & (1 << 5)) - offset |= CPSR_F; - if (insn & (1 << 9)) - imm = CPSR_A | CPSR_I | CPSR_F; + case 1: /* 16 x 16 -> 32 */ + gen_mulxy(tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 2: /* Dual multiply add. */ + case 4: /* Dual multiply subtract. */ + if (op) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (insn & (1 << 22)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a signed + * operation, in which case we must set the Q flag. + */ + gen_helper_add_setq(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 3: /* 32 * 16 -> 32msb */ + if (op) + tcg_gen_sari_i32(tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(tmp, tmp2); + tcg_gen_shri_i64(tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + if (rs != 15) { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + case 5: + case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ + tmp64 = gen_muls_i64_i32(tmp, tmp2); + if (rs != 15) { + tmp = load_reg(s, rs); + if (insn & (1 << 20)) { + tmp64 = gen_addq_msw(tmp64, tmp); + } else { + tmp64 = gen_subq_msw(tmp64, tmp); + } + } + if (insn & (1 << 4)) { + tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_temp_free_i64(tmp64); + break; + case 7: /* Unsigned sum of absolute differences. */ + gen_helper_usad8(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + } + break; + } + store_reg(s, rd, tmp); + break; + case 6: + case 7: /* 64-bit multiply, Divide. */ + op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((op & 0x50) == 0x10) { + /* sdiv, udiv */ + if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) { + goto illegal_op; + } + if (op & 0x20) + gen_helper_udiv(tmp, tmp, tmp2); + else + gen_helper_sdiv(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + } else if ((op & 0xe) == 0xc) { + /* Dual multiply accumulate long. */ + if (op & 1) + gen_swap_half(tmp2); + gen_smul_dual(tmp, tmp2); + if (op & 0x10) { + tcg_gen_sub_i32(tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tmp, tmp, tmp2); } - if (insn & (1 << 8)) { - offset |= 0x1f; - imm |= (insn & 0x1f); + tcg_temp_free_i32(tmp2); + /* BUGFIX */ + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + gen_addq(s, tmp64, rs, rd); + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tmp64); + } else { + if (op & 0x20) { + /* Unsigned 64-bit multiply */ + tmp64 = gen_mulu_i64_i32(tmp, tmp2); + } else { + if (op & 8) { + /* smlalxy */ + gen_mulxy(tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tmp2); + tmp64 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp64, tmp); + tcg_temp_free_i32(tmp); + } else { + /* Signed 64-bit multiply */ + tmp64 = gen_muls_i64_i32(tmp, tmp2); + } } - if (offset) { - gen_set_psr_im(s, offset, 0, imm); + if (op & 4) { + /* umaal */ + gen_addq_lo(s, tmp64, rs); + gen_addq_lo(s, tmp64, rd); + } else if (op & 0x40) { + /* 64-bit accumulate. */ + gen_addq(s, tmp64, rs, rd); } - break; - case 3: /* Special control operations. */ - ARCH(7); - op = (insn >> 4) & 0xf; + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tmp64); + } + break; + } + break; + case 6: + case 7: + case 14: + case 15: + /* Coprocessor. */ + if (((insn >> 24) & 3) == 3) { + /* Translate into the equivalent ARM encoding. */ + insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); + if (disas_neon_data_insn(env, s, insn)) + goto illegal_op; + } else { + if (insn & (1 << 28)) + goto illegal_op; + if (disas_coproc_insn(env, s, insn)) + goto illegal_op; + } + break; + case 8: + case 9: + case 10: + case 11: + if (insn & (1 << 15)) { + /* Branches, misc control. */ + if (insn & 0x5000) { + /* Unconditional branch. */ + /* signextend(hw1[10:0]) -> offset[:12]. */ + offset = ((int32_t) insn << 5) >> 9 & ~(int32_t) 0xfff; + /* hw1[10:0] -> offset[11:1]. */ + offset |= (insn & 0x7ff) << 1; + /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] + offset[24:22] already have the same value because of the + sign extension above. */ + offset ^= ((~insn) & (1 << 13)) << 10; + offset ^= ((~insn) & (1 << 11)) << 11; + + if (insn & (1 << 14)) { + /* Branch and link. */ + tcg_gen_movi_i32(cpu_R[14], s->pc | 1); + } + + offset += s->pc; + if (insn & (1 << 12)) { + /* b/bl */ + gen_jmp(s, offset); + } else { + /* blx */ + offset &= ~(uint32_t) 2; + /* thumb2 bx, no need to check */ + gen_bx_im(s, offset); + } + } else if (((insn >> 23) & 7) == 7) { + /* Misc control */ + if (insn & (1 << 13)) + goto illegal_op; + + if (insn & (1 << 26)) { + /* Secure monitor call (v6Z) */ + goto illegal_op; /* not implemented. */ + } else { + op = (insn >> 20) & 7; switch (op) { - case 2: /* clrex */ - gen_clrex(s); - break; - case 4: /* dsb */ - case 5: /* dmb */ - case 6: /* isb */ - /* These execute as NOPs. */ - break; - default: - goto illegal_op; + case 0: /* msr cpsr. */ + if (IS_M(env)) { + tmp = load_reg(s, rn); + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + break; + } + /* fall through */ + case 1: /* msr spsr. */ + if (IS_M(env)) + goto illegal_op; + tmp = load_reg(s, rn); + if (gen_set_psr(s, msr_mask(env, s, (insn >> 8) & 0xf, op == 1), op == 1, tmp)) + goto illegal_op; + break; + case 2: /* cps, nop-hint. */ + if (((insn >> 8) & 7) == 0) { + gen_nop_hint(s, insn & 0xff); + } + /* Implemented as NOP in user mode. */ + if (IS_USER(s)) + break; + offset = 0; + imm = 0; + if (insn & (1 << 10)) { + if (insn & (1 << 7)) + offset |= CPSR_A; + if (insn & (1 << 6)) + offset |= CPSR_I; + if (insn & (1 << 5)) + offset |= CPSR_F; + if (insn & (1 << 9)) + imm = CPSR_A | CPSR_I | CPSR_F; + } + if (insn & (1 << 8)) { + offset |= 0x1f; + imm |= (insn & 0x1f); + } + if (offset) { + gen_set_psr_im(s, offset, 0, imm); + } + break; + case 3: /* Special control operations. */ + ARCH(7); + op = (insn >> 4) & 0xf; + switch (op) { + case 2: /* clrex */ + gen_clrex(s); + break; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + /* These execute as NOPs. */ + break; + default: + goto illegal_op; + } + break; + case 4: /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rn); + gen_bx(s, tmp); + break; + case 5: /* Exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + if (rn != 14 || rd != 15) { + goto illegal_op; + } + tmp = load_reg(s, rn); + tcg_gen_subi_i32(tmp, tmp, insn & 0xff); + gen_exception_return(s, tmp); + break; + case 6: /* mrs cpsr. */ + tmp = tcg_temp_new_i32(); + if (IS_M(env)) { + addr = tcg_const_i32(insn & 0xff); + gen_helper_v7m_mrs(tmp, cpu_env, addr); + tcg_temp_free_i32(addr); + } else { + gen_helper_cpsr_read(tmp); + } + store_reg(s, rd, tmp); + break; + case 7: /* mrs spsr. */ + /* Not accessible in user mode. */ + if (IS_USER(s) || IS_M(env)) + goto illegal_op; + tmp = load_cpu_field(spsr); + store_reg(s, rd, tmp); + break; } - break; - case 4: /* bxj */ - /* Trivial implementation equivalent to bx. */ - tmp = load_reg(s, rn); - gen_bx(s, tmp); - break; - case 5: /* Exception return. */ - if (IS_USER(s)) { + } + } else { + /* Conditional branch. */ + op = (insn >> 22) & 0xf; + /* Generate a conditional jump to next instruction. */ + s->condlabel = gen_new_label(); + gen_test_cc(op ^ 1, s->condlabel); + s->condjmp = 1; + + /* offset[11:1] = insn[10:0] */ + offset = (insn & 0x7ff) << 1; + /* offset[17:12] = insn[21:16]. */ + offset |= (insn & 0x003f0000) >> 4; + /* offset[31:20] = insn[26]. */ + offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; + /* offset[18] = insn[13]. */ + offset |= (insn & (1 << 13)) << 5; + /* offset[19] = insn[11]. */ + offset |= (insn & (1 << 11)) << 8; + + /* jump to the offset */ + gen_jmp(s, s->pc + offset); + } + } else { + /* Data processing immediate. */ + if (insn & (1 << 25)) { + if (insn & (1 << 24)) { + if (insn & (1 << 20)) goto illegal_op; + /* Bitfield/Saturate. */ + op = (insn >> 21) & 7; + imm = insn & 0x1f; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + if (rn == 15) { + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else { + tmp = load_reg(s, rn); } - if (rn != 14 || rd != 15) { - goto illegal_op; + switch (op) { + case 2: /* Signed bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_sbfx(tmp, shift, imm); + break; + case 6: /* Unsigned bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_ubfx(tmp, shift, (1u << imm) - 1); + break; + case 3: /* Bitfield insert/clear. */ + if (imm < shift) + goto illegal_op; + imm = imm + 1 - shift; + if (imm != 32) { + tmp2 = load_reg(s, rd); + gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); + tcg_temp_free_i32(tmp2); + } + break; + case 7: + goto illegal_op; + default: /* Saturate. */ + if (shift) { + if (op & 1) + tcg_gen_sari_i32(tmp, tmp, shift); + else + tcg_gen_shli_i32(tmp, tmp, shift); + } + tmp2 = tcg_const_i32(imm); + if (op & 4) { + /* Unsigned. */ + if ((op & 1) && shift == 0) + gen_helper_usat16(tmp, tmp, tmp2); + else + gen_helper_usat(tmp, tmp, tmp2); + } else { + /* Signed. */ + if ((op & 1) && shift == 0) + gen_helper_ssat16(tmp, tmp, tmp2); + else + gen_helper_ssat(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + break; } - tmp = load_reg(s, rn); - tcg_gen_subi_i32(tmp, tmp, insn & 0xff); - gen_exception_return(s, tmp); - break; - case 6: /* mrs cpsr. */ - tmp = tcg_temp_new_i32(); - if (IS_M(env)) { - addr = tcg_const_i32(insn & 0xff); - gen_helper_v7m_mrs(tmp, cpu_env, addr); - tcg_temp_free_i32(addr); + store_reg(s, rd, tmp); + } else { + imm = ((insn & 0x04000000) >> 15) | ((insn & 0x7000) >> 4) | (insn & 0xff); + if (insn & (1 << 22)) { + /* 16-bit immediate. */ + imm |= (insn >> 4) & 0xf000; + if (insn & (1 << 23)) { + /* movt */ + tmp = load_reg(s, rd); + tcg_gen_ext16u_i32(tmp, tmp); + tcg_gen_ori_i32(tmp, tmp, imm << 16); + } else { + /* movw */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, imm); + } } else { - gen_helper_cpsr_read(tmp); + /* Add/sub 12-bit immediate. */ + if (rn == 15) { + offset = s->pc & ~(uint32_t) 3; + if (insn & (1 << 23)) + offset -= imm; + else + offset += imm; + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, offset); + } else { + tmp = load_reg(s, rn); + if (insn & (1 << 23)) + tcg_gen_subi_i32(tmp, tmp, imm); + else + tcg_gen_addi_i32(tmp, tmp, imm); + } } store_reg(s, rd, tmp); - break; - case 7: /* mrs spsr. */ - /* Not accessible in user mode. */ - if (IS_USER(s) || IS_M(env)) - goto illegal_op; - tmp = load_cpu_field(spsr); - store_reg(s, rd, tmp); - break; } - } - } else { - /* Conditional branch. */ - op = (insn >> 22) & 0xf; - /* Generate a conditional jump to next instruction. */ - s->condlabel = gen_new_label(); - gen_test_cc(op ^ 1, s->condlabel); - s->condjmp = 1; - - /* offset[11:1] = insn[10:0] */ - offset = (insn & 0x7ff) << 1; - /* offset[17:12] = insn[21:16]. */ - offset |= (insn & 0x003f0000) >> 4; - /* offset[31:20] = insn[26]. */ - offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; - /* offset[18] = insn[13]. */ - offset |= (insn & (1 << 13)) << 5; - /* offset[19] = insn[11]. */ - offset |= (insn & (1 << 11)) << 8; - - /* jump to the offset */ - gen_jmp(s, s->pc + offset); - } - } else { - /* Data processing immediate. */ - if (insn & (1 << 25)) { - if (insn & (1 << 24)) { - if (insn & (1 << 20)) - goto illegal_op; - /* Bitfield/Saturate. */ - op = (insn >> 21) & 7; - imm = insn & 0x1f; - shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + } else { + int shifter_out = 0; + /* modified 12-bit immediate. */ + shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); + imm = (insn & 0xff); + switch (shift) { + case 0: /* XY */ + /* Nothing to do. */ + break; + case 1: /* 00XY00XY */ + imm |= imm << 16; + break; + case 2: /* XY00XY00 */ + imm |= imm << 16; + imm <<= 8; + break; + case 3: /* XYXYXYXY */ + imm |= imm << 16; + imm |= imm << 8; + break; + default: /* Rotated constant. */ + shift = (shift << 1) | (imm >> 7); + imm |= 0x80; + imm = imm << (32 - shift); + shifter_out = 1; + break; + } + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, imm); + rn = (insn >> 16) & 0xf; if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } - switch (op) { - case 2: /* Signed bitfield extract. */ - imm++; - if (shift + imm > 32) - goto illegal_op; - if (imm < 32) - gen_sbfx(tmp, shift, imm); - break; - case 6: /* Unsigned bitfield extract. */ - imm++; - if (shift + imm > 32) - goto illegal_op; - if (imm < 32) - gen_ubfx(tmp, shift, (1u << imm) - 1); - break; - case 3: /* Bitfield insert/clear. */ - if (imm < shift) - goto illegal_op; - imm = imm + 1 - shift; - if (imm != 32) { - tmp2 = load_reg(s, rd); - gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); - tcg_temp_free_i32(tmp2); - } - break; - case 7: + op = (insn >> 21) & 0xf; + if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, shifter_out, tmp, tmp2)) goto illegal_op; - default: /* Saturate. */ - if (shift) { - if (op & 1) - tcg_gen_sari_i32(tmp, tmp, shift); - else - tcg_gen_shli_i32(tmp, tmp, shift); - } - tmp2 = tcg_const_i32(imm); - if (op & 4) { - /* Unsigned. */ - if ((op & 1) && shift == 0) - gen_helper_usat16(tmp, tmp, tmp2); - else - gen_helper_usat(tmp, tmp, tmp2); - } else { - /* Signed. */ - if ((op & 1) && shift == 0) - gen_helper_ssat16(tmp, tmp, tmp2); - else - gen_helper_ssat(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp2); - break; - } - store_reg(s, rd, tmp); - } else { - imm = ((insn & 0x04000000) >> 15) - | ((insn & 0x7000) >> 4) | (insn & 0xff); - if (insn & (1 << 22)) { - /* 16-bit immediate. */ - imm |= (insn >> 4) & 0xf000; - if (insn & (1 << 23)) { - /* movt */ - tmp = load_reg(s, rd); - tcg_gen_ext16u_i32(tmp, tmp); - tcg_gen_ori_i32(tmp, tmp, imm << 16); - } else { - /* movw */ - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, imm); - } + tcg_temp_free_i32(tmp2); + rd = (insn >> 8) & 0xf; + if (rd != 15) { + store_reg(s, rd, tmp); } else { - /* Add/sub 12-bit immediate. */ - if (rn == 15) { - offset = s->pc & ~(uint32_t)3; - if (insn & (1 << 23)) - offset -= imm; - else - offset += imm; - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, offset); - } else { - tmp = load_reg(s, rn); - if (insn & (1 << 23)) - tcg_gen_subi_i32(tmp, tmp, imm); - else - tcg_gen_addi_i32(tmp, tmp, imm); - } + tcg_temp_free_i32(tmp); } - store_reg(s, rd, tmp); - } - } else { - int shifter_out = 0; - /* modified 12-bit immediate. */ - shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); - imm = (insn & 0xff); - switch (shift) { - case 0: /* XY */ - /* Nothing to do. */ - break; - case 1: /* 00XY00XY */ - imm |= imm << 16; - break; - case 2: /* XY00XY00 */ - imm |= imm << 16; - imm <<= 8; - break; - case 3: /* XYXYXYXY */ - imm |= imm << 16; - imm |= imm << 8; - break; - default: /* Rotated constant. */ - shift = (shift << 1) | (imm >> 7); - imm |= 0x80; - imm = imm << (32 - shift); - shifter_out = 1; - break; - } - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, imm); - rn = (insn >> 16) & 0xf; - if (rn == 15) { - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); - } else { - tmp = load_reg(s, rn); - } - op = (insn >> 21) & 0xf; - if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, - shifter_out, tmp, tmp2)) - goto illegal_op; - tcg_temp_free_i32(tmp2); - rd = (insn >> 8) & 0xf; - if (rd != 15) { - store_reg(s, rd, tmp); - } else { - tcg_temp_free_i32(tmp); } } - } - break; - case 12: /* Load/store single data item. */ - { - int postinc = 0; - int writeback = 0; - int user; - if ((insn & 0x01100000) == 0x01000000) { - if (disas_neon_ls_insn(env, s, insn)) - goto illegal_op; break; - } - op = ((insn >> 21) & 3) | ((insn >> 22) & 4); - if (rs == 15) { - if (!(insn & (1 << 20))) { - goto illegal_op; + case 12: /* Load/store single data item. */ + { + int postinc = 0; + int writeback = 0; + int user; + if ((insn & 0x01100000) == 0x01000000) { + if (disas_neon_ls_insn(env, s, insn)) + goto illegal_op; + break; } - if (op != 2) { - /* Byte or halfword load space with dest == r15 : memory hints. - * Catch them early so we don't emit pointless addressing code. - * This space is a mix of: - * PLD/PLDW/PLI, which we implement as NOPs (note that unlike - * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP - * cores) - * unallocated hints, which must be treated as NOPs - * UNPREDICTABLE space, which we NOP or UNDEF depending on - * which is easiest for the decoding logic - * Some space which must UNDEF - */ - int op1 = (insn >> 23) & 3; - int op2 = (insn >> 6) & 0x3f; - if (op & 2) { + op = ((insn >> 21) & 3) | ((insn >> 22) & 4); + if (rs == 15) { + if (!(insn & (1 << 20))) { goto illegal_op; } - if (rn == 15) { - /* UNPREDICTABLE, unallocated hint or - * PLD/PLDW/PLI (literal) + if (op != 2) { + /* Byte or halfword load space with dest == r15 : memory hints. + * Catch them early so we don't emit pointless addressing code. + * This space is a mix of: + * PLD/PLDW/PLI, which we implement as NOPs (note that unlike + * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP + * cores) + * unallocated hints, which must be treated as NOPs + * UNPREDICTABLE space, which we NOP or UNDEF depending on + * which is easiest for the decoding logic + * Some space which must UNDEF */ - return 0; - } - if (op1 & 1) { - return 0; /* PLD/PLDW/PLI or unallocated hint */ + int op1 = (insn >> 23) & 3; + int op2 = (insn >> 6) & 0x3f; + if (op & 2) { + goto illegal_op; + } + if (rn == 15) { + /* UNPREDICTABLE, unallocated hint or + * PLD/PLDW/PLI (literal) + */ + return 0; + } + if (op1 & 1) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + /* UNDEF space, or an UNPREDICTABLE */ + return 1; } - if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { - return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + user = IS_USER(s); + if (rn == 15) { + addr = tcg_temp_new_i32(); + /* PC relative. */ + /* s->pc has already been incremented by 4. */ + imm = s->pc & 0xfffffffc; + if (insn & (1 << 23)) + imm += insn & 0xfff; + else + imm -= insn & 0xfff; + tcg_gen_movi_i32(addr, imm); + } else { + addr = load_reg(s, rn); + if (insn & (1 << 23)) { + /* Positive offset. */ + imm = insn & 0xfff; + tcg_gen_addi_i32(addr, addr, imm); + } else { + imm = insn & 0xff; + switch ((insn >> 8) & 0xf) { + case 0x0: /* Shifted Register. */ + shift = (insn >> 4) & 0xf; + if (shift > 3) { + tcg_temp_free_i32(addr); + goto illegal_op; + } + tmp = load_reg(s, rm); + if (shift) + tcg_gen_shli_i32(tmp, tmp, shift); + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + break; + case 0xc: /* Negative offset. */ + tcg_gen_addi_i32(addr, addr, -imm); + break; + case 0xe: /* User privilege. */ + tcg_gen_addi_i32(addr, addr, imm); + user = 1; + break; + case 0x9: /* Post-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xb: /* Post-increment. */ + postinc = 1; + writeback = 1; + break; + case 0xd: /* Pre-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xf: /* Pre-increment. */ + tcg_gen_addi_i32(addr, addr, imm); + writeback = 1; + break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; + } } - /* UNDEF space, or an UNPREDICTABLE */ - return 1; } - } - user = IS_USER(s); - if (rn == 15) { - addr = tcg_temp_new_i32(); - /* PC relative. */ - /* s->pc has already been incremented by 4. */ - imm = s->pc & 0xfffffffc; - if (insn & (1 << 23)) - imm += insn & 0xfff; - else - imm -= insn & 0xfff; - tcg_gen_movi_i32(addr, imm); - } else { - addr = load_reg(s, rn); - if (insn & (1 << 23)) { - /* Positive offset. */ - imm = insn & 0xfff; - tcg_gen_addi_i32(addr, addr, imm); + if (insn & (1 << 20)) { + /* Load. */ + switch (op) { + case 0: + tmp = gen_ld8u(addr, user); + break; + case 4: + tmp = gen_ld8s(addr, user); + break; + case 1: + tmp = gen_ld16u(addr, user); + break; + case 5: + tmp = gen_ld16s(addr, user); + break; + case 2: + tmp = gen_ld32(addr, user); + break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; + } + if (rs == 15) { + gen_bx(s, tmp); + } else { + store_reg(s, rs, tmp); + } } else { - imm = insn & 0xff; - switch ((insn >> 8) & 0xf) { - case 0x0: /* Shifted Register. */ - shift = (insn >> 4) & 0xf; - if (shift > 3) { + /* Store. */ + tmp = load_reg(s, rs); + switch (op) { + case 0: + gen_st8(tmp, addr, user); + break; + case 1: + gen_st16(tmp, addr, user); + break; + case 2: + gen_st32(tmp, addr, user); + break; + default: tcg_temp_free_i32(addr); goto illegal_op; - } - tmp = load_reg(s, rm); - if (shift) - tcg_gen_shli_i32(tmp, tmp, shift); - tcg_gen_add_i32(addr, addr, tmp); - tcg_temp_free_i32(tmp); - break; - case 0xc: /* Negative offset. */ - tcg_gen_addi_i32(addr, addr, -imm); - break; - case 0xe: /* User privilege. */ - tcg_gen_addi_i32(addr, addr, imm); - user = 1; - break; - case 0x9: /* Post-decrement. */ - imm = -imm; - /* Fall through. */ - case 0xb: /* Post-increment. */ - postinc = 1; - writeback = 1; - break; - case 0xd: /* Pre-decrement. */ - imm = -imm; - /* Fall through. */ - case 0xf: /* Pre-increment. */ - tcg_gen_addi_i32(addr, addr, imm); - writeback = 1; - break; - default: - tcg_temp_free_i32(addr); - goto illegal_op; } } - } - if (insn & (1 << 20)) { - /* Load. */ - switch (op) { - case 0: tmp = gen_ld8u(addr, user); break; - case 4: tmp = gen_ld8s(addr, user); break; - case 1: tmp = gen_ld16u(addr, user); break; - case 5: tmp = gen_ld16s(addr, user); break; - case 2: tmp = gen_ld32(addr, user); break; - default: - tcg_temp_free_i32(addr); - goto illegal_op; - } - if (rs == 15) { - gen_bx(s, tmp); + if (postinc) + tcg_gen_addi_i32(addr, addr, imm); + if (writeback) { + store_reg(s, rn, addr); } else { - store_reg(s, rs, tmp); - } - } else { - /* Store. */ - tmp = load_reg(s, rs); - switch (op) { - case 0: gen_st8(tmp, addr, user); break; - case 1: gen_st16(tmp, addr, user); break; - case 2: gen_st32(tmp, addr, user); break; - default: tcg_temp_free_i32(addr); - goto illegal_op; } - } - if (postinc) - tcg_gen_addi_i32(addr, addr, imm); - if (writeback) { - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); - } - } - break; - default: - goto illegal_op; + } break; + default: + goto illegal_op; } return 0; illegal_op: return 1; } -static void disas_thumb_insn(CPUARMState *env, DisasContext *s) -{ +static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; - uint32_t k, count; //only used for counting the number of reglist when poping with pc + uint32_t k, count; // only used for counting the number of reglist when poping with pc int i; TCGv tmp; TCGv tmp2; @@ -9185,10 +9477,10 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) if (s->condexec_mask) { cond = s->condexec_cond; - if (cond != 0x0e) { /* Skip conditional when condition is AL. */ - s->condlabel = gen_new_label(); - gen_test_cc(cond ^ 1, s->condlabel); - s->condjmp = 1; + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ + s->condlabel = gen_new_label(); + gen_test_cc(cond ^ 1, s->condlabel); + s->condjmp = 1; } } @@ -9196,694 +9488,716 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) s->pc += 2; switch (insn >> 12) { - case 0: case 1: + case 0: + case 1: - rd = insn & 7; - op = (insn >> 11) & 3; - if (op == 3) { - /* add/subtract */ - rn = (insn >> 3) & 7; - tmp = load_reg(s, rn); - if (insn & (1 << 10)) { - /* immediate */ - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); - } else { - /* reg */ - rm = (insn >> 6) & 7; - tmp2 = load_reg(s, rm); - } - if (insn & (1 << 9)) { - if (s->condexec_mask) - tcg_gen_sub_i32(tmp, tmp, tmp2); - else - gen_helper_sub_cc(tmp, tmp, tmp2); - } else { - if (s->condexec_mask) - tcg_gen_add_i32(tmp, tmp, tmp2); - else - gen_helper_add_cc(tmp, tmp, tmp2); - } - tcg_temp_free_i32(tmp2); - store_reg(s, rd, tmp); - } else { - /* shift immediate */ - rm = (insn >> 3) & 7; - shift = (insn >> 6) & 0x1f; - tmp = load_reg(s, rm); - gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0); - if (!s->condexec_mask) - gen_logic_CC(tmp); - store_reg(s, rd, tmp); - } - break; - case 2: case 3: - /* arithmetic large immediate */ - op = (insn >> 11) & 3; - rd = (insn >> 8) & 0x7; - if (op == 0) { /* mov */ - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, insn & 0xff); - if (!s->condexec_mask) - gen_logic_CC(tmp); - store_reg(s, rd, tmp); - } else { - tmp = load_reg(s, rd); - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, insn & 0xff); - switch (op) { - case 1: /* cmp */ - gen_helper_sub_cc(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); - break; - case 2: /* add */ - if (s->condexec_mask) - tcg_gen_add_i32(tmp, tmp, tmp2); - else - gen_helper_add_cc(tmp, tmp, tmp2); + rd = insn & 7; + op = (insn >> 11) & 3; + if (op == 3) { + /* add/subtract */ + rn = (insn >> 3) & 7; + tmp = load_reg(s, rn); + if (insn & (1 << 10)) { + /* immediate */ + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); + } else { + /* reg */ + rm = (insn >> 6) & 7; + tmp2 = load_reg(s, rm); + } + if (insn & (1 << 9)) { + if (s->condexec_mask) + tcg_gen_sub_i32(tmp, tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + } else { + if (s->condexec_mask) + tcg_gen_add_i32(tmp, tmp, tmp2); + else + gen_helper_add_cc(tmp, tmp, tmp2); + } tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); - break; - case 3: /* sub */ - if (s->condexec_mask) - tcg_gen_sub_i32(tmp, tmp, tmp2); - else - gen_helper_sub_cc(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); + } else { + /* shift immediate */ + rm = (insn >> 3) & 7; + shift = (insn >> 6) & 0x1f; + tmp = load_reg(s, rm); + gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0); + if (!s->condexec_mask) + gen_logic_CC(tmp); store_reg(s, rd, tmp); - break; } - } - break; - case 4: - if (insn & (1 << 11)) { - rd = (insn >> 8) & 7; - /* load pc-relative. Bit 1 of PC is ignored. */ - val = s->pc + 2 + ((insn & 0xff) * 4); - val &= ~(uint32_t)2; - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, val); - tmp = gen_ld32(addr, IS_USER(s)); - tcg_temp_free_i32(addr); - store_reg(s, rd, tmp); break; - } - if (insn & (1 << 10)) { - /* data processing extended or blx */ - rd = (insn & 7) | ((insn >> 4) & 8); - rm = (insn >> 3) & 0xf; - op = (insn >> 8) & 3; - switch (op) { - case 0: /* add */ - tmp = load_reg(s, rd); - tmp2 = load_reg(s, rm); - tcg_gen_add_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); + case 2: + case 3: + /* arithmetic large immediate */ + op = (insn >> 11) & 3; + rd = (insn >> 8) & 0x7; + if (op == 0) { /* mov */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, insn & 0xff); + if (!s->condexec_mask) + gen_logic_CC(tmp); store_reg(s, rd, tmp); - break; - case 1: /* cmp */ + } else { tmp = load_reg(s, rd); - tmp2 = load_reg(s, rm); - gen_helper_sub_cc(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; - case 2: /* mov/cpy */ - tmp = load_reg(s, rm); + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, insn & 0xff); + switch (op) { + case 1: /* cmp */ + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + break; + case 2: /* add */ + if (s->condexec_mask) + tcg_gen_add_i32(tmp, tmp, tmp2); + else + gen_helper_add_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* sub */ + if (s->condexec_mask) + tcg_gen_sub_i32(tmp, tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + } + } + break; + case 4: + if (insn & (1 << 11)) { + rd = (insn >> 8) & 7; + /* load pc-relative. Bit 1 of PC is ignored. */ + val = s->pc + 2 + ((insn & 0xff) * 4); + val &= ~(uint32_t) 2; + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, val); + tmp = gen_ld32(addr, IS_USER(s)); + tcg_temp_free_i32(addr); store_reg(s, rd, tmp); break; - case 3:/* branch [and link] exchange thumb register */ - tmp = load_reg(s, rm); - if (insn & (1 << 7)) { - ARCH(5); - val = (uint32_t)s->pc | 1; - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, val); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - }else{ - gen_bx(s,tmp); - if(env->v7m.exception != 0&&IS_M(env)&&env->regs[14]>0xf0000000){ - // printf("interrupt pc=0x%x\n", env->regs[15]); - // printf("interrupt lr=0x%x\n", env->regs[14]); - gen_exception(EXCP_EXCEPTION_EXIT); - s->is_jmp=DISAS_UPDATE; - } + } + if (insn & (1 << 10)) { + /* data processing extended or blx */ + rd = (insn & 7) | ((insn >> 4) & 8); + rm = (insn >> 3) & 0xf; + op = (insn >> 8) & 3; + switch (op) { + case 0: /* add */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + tcg_gen_add_i32(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + store_reg(s, rd, tmp); + break; + case 1: /* cmp */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + gen_helper_sub_cc(tmp, tmp, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp); + break; + case 2: /* mov/cpy */ + tmp = load_reg(s, rm); + store_reg(s, rd, tmp); + break; + case 3: /* branch [and link] exchange thumb register */ + tmp = load_reg(s, rm); + if (insn & (1 << 7)) { + ARCH(5); + val = (uint32_t) s->pc | 1; + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, val); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + } else { + gen_bx(s, tmp); + if (env->v7m.exception != 0 && IS_M(env) && env->regs[14] > 0xf0000000) { + // printf("interrupt pc=0x%x\n", env->regs[15]); + // printf("interrupt lr=0x%x\n", env->regs[14]); + gen_exception(EXCP_EXCEPTION_EXIT); + s->is_jmp = DISAS_UPDATE; + } + } + break; } break; } - break; - } - - /* data processing register */ - rd = insn & 7; - rm = (insn >> 3) & 7; - op = (insn >> 6) & 0xf; - if (op == 2 || op == 3 || op == 4 || op == 7) { - /* the shift/rotate ops want the operands backwards */ - val = rm; - rm = rd; - rd = val; - val = 1; - } else { - val = 0; - } - - if (op == 9) { /* neg */ - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); - } else if (op != 0xf) { /* mvn doesn't read its first operand */ - tmp = load_reg(s, rd); - } else { - TCGV_UNUSED(tmp); - } - tmp2 = load_reg(s, rm); - switch (op) { - case 0x0: /* and */ - tcg_gen_and_i32(tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp); - break; - case 0x1: /* eor */ - tcg_gen_xor_i32(tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp); - break; - case 0x2: /* lsl */ - if (s->condexec_mask) { - gen_helper_shl(tmp2, tmp2, tmp); + /* data processing register */ + rd = insn & 7; + rm = (insn >> 3) & 7; + op = (insn >> 6) & 0xf; + if (op == 2 || op == 3 || op == 4 || op == 7) { + /* the shift/rotate ops want the operands backwards */ + val = rm; + rm = rd; + rd = val; + val = 1; } else { - gen_helper_shl_cc(tmp2, tmp2, tmp); - gen_logic_CC(tmp2); + val = 0; } - break; - case 0x3: /* lsr */ - if (s->condexec_mask) { - gen_helper_shr(tmp2, tmp2, tmp); + + if (op == 9) { /* neg */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, 0); + } else if (op != 0xf) { /* mvn doesn't read its first operand */ + tmp = load_reg(s, rd); } else { - gen_helper_shr_cc(tmp2, tmp2, tmp); - gen_logic_CC(tmp2); + TCGV_UNUSED(tmp); } - break; - case 0x4: /* asr */ - if (s->condexec_mask) { - gen_helper_sar(tmp2, tmp2, tmp); + + tmp2 = load_reg(s, rm); + switch (op) { + case 0x0: /* and */ + tcg_gen_and_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0x1: /* eor */ + tcg_gen_xor_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0x2: /* lsl */ + if (s->condexec_mask) { + gen_helper_shl(tmp2, tmp2, tmp); + } else { + gen_helper_shl_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x3: /* lsr */ + if (s->condexec_mask) { + gen_helper_shr(tmp2, tmp2, tmp); + } else { + gen_helper_shr_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x4: /* asr */ + if (s->condexec_mask) { + gen_helper_sar(tmp2, tmp2, tmp); + } else { + gen_helper_sar_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x5: /* adc */ + if (s->condexec_mask) + gen_adc(tmp, tmp2); + else + gen_helper_adc_cc(tmp, tmp, tmp2); + break; + case 0x6: /* sbc */ + if (s->condexec_mask) + gen_sub_carry(tmp, tmp, tmp2); + else + gen_helper_sbc_cc(tmp, tmp, tmp2); + break; + case 0x7: /* ror */ + if (s->condexec_mask) { + tcg_gen_andi_i32(tmp, tmp, 0x1f); + tcg_gen_rotr_i32(tmp2, tmp2, tmp); + } else { + gen_helper_ror_cc(tmp2, tmp2, tmp); + gen_logic_CC(tmp2); + } + break; + case 0x8: /* tst */ + tcg_gen_and_i32(tmp, tmp, tmp2); + gen_logic_CC(tmp); + rd = 16; + break; + case 0x9: /* neg */ + if (s->condexec_mask) + tcg_gen_neg_i32(tmp, tmp2); + else + gen_helper_sub_cc(tmp, tmp, tmp2); + break; + case 0xa: /* cmp */ + gen_helper_sub_cc(tmp, tmp, tmp2); + rd = 16; + break; + case 0xb: /* cmn */ + gen_helper_add_cc(tmp, tmp, tmp2); + rd = 16; + break; + case 0xc: /* orr */ + tcg_gen_or_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xd: /* mul */ + tcg_gen_mul_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xe: /* bic */ + tcg_gen_andc_i32(tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp); + break; + case 0xf: /* mvn */ + tcg_gen_not_i32(tmp2, tmp2); + if (!s->condexec_mask) + gen_logic_CC(tmp2); + val = 1; + rm = rd; + break; + } + if (rd != 16) { + if (val) { + store_reg(s, rm, tmp2); + if (op != 0xf) + tcg_temp_free_i32(tmp); + } else { + store_reg(s, rd, tmp); + tcg_temp_free_i32(tmp2); + } } else { - gen_helper_sar_cc(tmp2, tmp2, tmp); - gen_logic_CC(tmp2); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); } break; - case 0x5: /* adc */ - if (s->condexec_mask) - gen_adc(tmp, tmp2); - else - gen_helper_adc_cc(tmp, tmp, tmp2); - break; - case 0x6: /* sbc */ - if (s->condexec_mask) - gen_sub_carry(tmp, tmp, tmp2); - else - gen_helper_sbc_cc(tmp, tmp, tmp2); + + case 5: + /* load/store register offset. */ + rd = insn & 7; + rn = (insn >> 3) & 7; + rm = (insn >> 6) & 7; + op = (insn >> 9) & 7; + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + + if (op < 3) /* store */ + tmp = load_reg(s, rd); + + switch (op) { + case 0: /* str */ + gen_st32(tmp, addr, IS_USER(s)); + break; + case 1: /* strh */ + gen_st16(tmp, addr, IS_USER(s)); + break; + case 2: /* strb */ + gen_st8(tmp, addr, IS_USER(s)); + break; + case 3: /* ldrsb */ + tmp = gen_ld8s(addr, IS_USER(s)); + break; + case 4: /* ldr */ + tmp = gen_ld32(addr, IS_USER(s)); + break; + case 5: /* ldrh */ + tmp = gen_ld16u(addr, IS_USER(s)); + break; + case 6: /* ldrb */ + tmp = gen_ld8u(addr, IS_USER(s)); + break; + case 7: /* ldrsh */ + tmp = gen_ld16s(addr, IS_USER(s)); + break; + } + if (op >= 3) /* load */ + store_reg(s, rd, tmp); + tcg_temp_free_i32(addr); break; - case 0x7: /* ror */ - if (s->condexec_mask) { - tcg_gen_andi_i32(tmp, tmp, 0x1f); - tcg_gen_rotr_i32(tmp2, tmp2, tmp); + + case 6: + /* load/store word immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 4) & 0x7c; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); } else { - gen_helper_ror_cc(tmp2, tmp2, tmp); - gen_logic_CC(tmp2); + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); } + tcg_temp_free_i32(addr); break; - case 0x8: /* tst */ - tcg_gen_and_i32(tmp, tmp, tmp2); - gen_logic_CC(tmp); - rd = 16; - break; - case 0x9: /* neg */ - if (s->condexec_mask) - tcg_gen_neg_i32(tmp, tmp2); - else - gen_helper_sub_cc(tmp, tmp, tmp2); - break; - case 0xa: /* cmp */ - gen_helper_sub_cc(tmp, tmp, tmp2); - rd = 16; - break; - case 0xb: /* cmn */ - gen_helper_add_cc(tmp, tmp, tmp2); - rd = 16; - break; - case 0xc: /* orr */ - tcg_gen_or_i32(tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp); - break; - case 0xd: /* mul */ - tcg_gen_mul_i32(tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp); - break; - case 0xe: /* bic */ - tcg_gen_andc_i32(tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp); - break; - case 0xf: /* mvn */ - tcg_gen_not_i32(tmp2, tmp2); - if (!s->condexec_mask) - gen_logic_CC(tmp2); - val = 1; - rm = rd; - break; - } - if (rd != 16) { - if (val) { - store_reg(s, rm, tmp2); - if (op != 0xf) - tcg_temp_free_i32(tmp); - } else { + + case 7: + /* load/store byte immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 6) & 0x1f; + tcg_gen_addi_i32(addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld8u(addr, IS_USER(s)); store_reg(s, rd, tmp); - tcg_temp_free_i32(tmp2); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st8(tmp, addr, IS_USER(s)); } - } else { - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); - } - break; - - case 5: - /* load/store register offset. */ - rd = insn & 7; - rn = (insn >> 3) & 7; - rm = (insn >> 6) & 7; - op = (insn >> 9) & 7; - addr = load_reg(s, rn); - tmp = load_reg(s, rm); - tcg_gen_add_i32(addr, addr, tmp); - tcg_temp_free_i32(tmp); + tcg_temp_free_i32(addr); + break; - if (op < 3) /* store */ - tmp = load_reg(s, rd); + case 8: + /* load/store halfword immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 5) & 0x3e; + tcg_gen_addi_i32(addr, addr, val); - switch (op) { - case 0: /* str */ - gen_st32(tmp, addr, IS_USER(s)); - break; - case 1: /* strh */ - gen_st16(tmp, addr, IS_USER(s)); - break; - case 2: /* strb */ - gen_st8(tmp, addr, IS_USER(s)); - break; - case 3: /* ldrsb */ - tmp = gen_ld8s(addr, IS_USER(s)); - break; - case 4: /* ldr */ - tmp = gen_ld32(addr, IS_USER(s)); - break; - case 5: /* ldrh */ - tmp = gen_ld16u(addr, IS_USER(s)); - break; - case 6: /* ldrb */ - tmp = gen_ld8u(addr, IS_USER(s)); - break; - case 7: /* ldrsh */ - tmp = gen_ld16s(addr, IS_USER(s)); + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld16u(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st16(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); break; - } - if (op >= 3) /* load */ - store_reg(s, rd, tmp); - tcg_temp_free_i32(addr); - break; - - case 6: - /* load/store word immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 4) & 0x7c; - tcg_gen_addi_i32(addr, addr, val); - if (insn & (1 << 11)) { - /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_st32(tmp, addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - break; + case 9: + /* load/store from stack */ + rd = (insn >> 8) & 7; + addr = load_reg(s, 13); + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(addr, addr, val); - case 7: - /* load/store byte immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 6) & 0x1f; - tcg_gen_addi_i32(addr, addr, val); + if (insn & (1 << 11)) { + /* load */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_temp_free_i32(addr); + break; - if (insn & (1 << 11)) { - /* load */ - tmp = gen_ld8u(addr, IS_USER(s)); + case 10: + /* add to high reg */ + rd = (insn >> 8) & 7; + if (insn & (1 << 11)) { + /* SP */ + tmp = load_reg(s, 13); + } else { + /* PC. bit 1 is ignored. */ + tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t) 2); + } + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(tmp, tmp, val); store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_st8(tmp, addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - break; + break; - case 8: - /* load/store halfword immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 5) & 0x3e; - tcg_gen_addi_i32(addr, addr, val); + case 11: + /* misc */ + op = (insn >> 8) & 0xf; + switch (op) { + case 0: + /* adjust stack pointer */ + tmp = load_reg(s, 13); + val = (insn & 0x7f) * 4; + if (insn & (1 << 7)) + val = -(int32_t) val; + tcg_gen_addi_i32(tmp, tmp, val); + store_reg(s, 13, tmp); + break; - if (insn & (1 << 11)) { - /* load */ - tmp = gen_ld16u(addr, IS_USER(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_st16(tmp, addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - break; + case 2: /* sign/zero extend. */ + ARCH(6); + rd = insn & 7; + rm = (insn >> 3) & 7; + tmp = load_reg(s, rm); + switch ((insn >> 6) & 3) { + case 0: + gen_sxth(tmp); + break; + case 1: + gen_sxtb(tmp); + break; + case 2: + gen_uxth(tmp); + break; + case 3: + gen_uxtb(tmp); + break; + } + store_reg(s, rd, tmp); + break; + case 4: + case 5: + case 0xc: + case 0xd: + /* push/pop */ + addr = load_reg(s, 13); + if (insn & (1 << 8)) + offset = 4; + else + offset = 0; + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) + offset += 4; + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(addr, addr, -offset); + } + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* pop */ + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, i, tmp); + } else { + /* push */ + tmp = load_reg(s, i); + gen_st32(tmp, addr, IS_USER(s)); + } + /* advance to the next address. */ + tcg_gen_addi_i32(addr, addr, 4); + } + } + TCGV_UNUSED(tmp); + if (insn & (1 << 8)) { + if (insn & (1 << 11)) { + /* pop pc */ + tmp = gen_ld32(addr, IS_USER(s)); + /* don't set the pc until the rest of the instruction + has completed */ + } else { + /* push lr */ + tmp = load_reg(s, 14); + gen_st32(tmp, addr, IS_USER(s)); + } + tcg_gen_addi_i32(addr, addr, 4); + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(addr, addr, -offset); + } + /* write back the new stack pointer */ + store_reg(s, 13, addr); + /* set the new PC value */ + if ((insn & 0x0900) == 0x0900) { + store_reg_from_load(env, s, 15, tmp); + // To find how many other regs pop with pc + if (env->v7m.exception != 0 && IS_M(env)) { + count = 0; + for (k = 0; k < 8; k++) { + if ((insn & (1 << k)) != 0) + count++; + } + val = ldl_phys(env->regs[13] + count * 4); + // if pop pc is EXC_RETURN invode interrupt exit. + if (val > 0xffff0000) { + gen_exception(EXCP_EXCEPTION_EXIT); + s->is_jmp = DISAS_UPDATE; + } + } + } + break; - case 9: - /* load/store from stack */ - rd = (insn >> 8) & 7; - addr = load_reg(s, 13); - val = (insn & 0xff) * 4; - tcg_gen_addi_i32(addr, addr, val); + case 1: + case 3: + case 9: + case 11: /* czb */ + rm = insn & 7; + tmp = load_reg(s, rm); + s->condlabel = gen_new_label(); + s->condjmp = 1; + if (insn & (1 << 11)) + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); + else + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel); + tcg_temp_free_i32(tmp); + offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; + val = (uint32_t) s->pc + 2; + val += offset; + gen_jmp(s, val); + break; - if (insn & (1 << 11)) { - /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_st32(tmp, addr, IS_USER(s)); - } - tcg_temp_free_i32(addr); - break; + case 15: /* IT, nop-hint. */ + if ((insn & 0xf) == 0) { + gen_nop_hint(s, (insn >> 4) & 0xf); + break; + } + /* If Then. */ + s->condexec_cond = (insn >> 4) & 0xe; + s->condexec_mask = insn & 0x1f; + /* No actual code generated for this insn, just setup state. */ + break; - case 10: - /* add to high reg */ - rd = (insn >> 8) & 7; - if (insn & (1 << 11)) { - /* SP */ - tmp = load_reg(s, 13); - } else { - /* PC. bit 1 is ignored. */ - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2); - } - val = (insn & 0xff) * 4; - tcg_gen_addi_i32(tmp, tmp, val); - store_reg(s, rd, tmp); - break; + case 0xe: /* bkpt */ + ARCH(5); + gen_exception_insn(s, 2, EXCP_BKPT); + break; - case 11: - /* misc */ - op = (insn >> 8) & 0xf; - switch (op) { - case 0: - /* adjust stack pointer */ - tmp = load_reg(s, 13); - val = (insn & 0x7f) * 4; - if (insn & (1 << 7)) - val = -(int32_t)val; - tcg_gen_addi_i32(tmp, tmp, val); - store_reg(s, 13, tmp); - break; + case 0xa: /* rev */ + ARCH(6); + rn = (insn >> 3) & 0x7; + rd = insn & 0x7; + tmp = load_reg(s, rn); + switch ((insn >> 6) & 3) { + case 0: + tcg_gen_bswap32_i32(tmp, tmp); + break; + case 1: + gen_rev16(tmp); + break; + case 3: + gen_revsh(tmp); + break; + default: + goto illegal_op; + } + store_reg(s, rd, tmp); + break; - case 2: /* sign/zero extend. */ - ARCH(6); - rd = insn & 7; - rm = (insn >> 3) & 7; - tmp = load_reg(s, rm); - switch ((insn >> 6) & 3) { - case 0: gen_sxth(tmp); break; - case 1: gen_sxtb(tmp); break; - case 2: gen_uxth(tmp); break; - case 3: gen_uxtb(tmp); break; + case 6: + switch ((insn >> 5) & 7) { + case 2: + /* setend */ + ARCH(6); + if (((insn >> 3) & 1) != s->bswap_code) { + /* Dynamic endianness switching not implemented. */ + goto illegal_op; + } + break; + case 3: + /* cps */ + ARCH(6); + if (IS_USER(s)) { + break; + } + if (IS_M(env)) { + tmp = tcg_const_i32((insn & (1 << 4)) != 0); + /* FAULTMASK */ + if (insn & 1) { + addr = tcg_const_i32(19); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + } + /* PRIMASK */ + if (insn & 2) { + addr = tcg_const_i32(16); + gen_helper_v7m_msr(cpu_env, addr, tmp); + tcg_temp_free_i32(addr); + } + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + } else { + if (insn & (1 << 4)) { + shift = CPSR_A | CPSR_I | CPSR_F; + } else { + shift = 0; + } + gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); + } + break; + default: + goto undef; + } + break; + + default: + goto undef; } - store_reg(s, rd, tmp); break; - case 4: case 5: case 0xc: case 0xd: - /* push/pop */ - addr = load_reg(s, 13); - if (insn & (1 << 8)) - offset = 4; - else - offset = 0; - for (i = 0; i < 8; i++) { - if (insn & (1 << i)) - offset += 4; - } - if ((insn & (1 << 11)) == 0) { - tcg_gen_addi_i32(addr, addr, -offset); - } + + case 12: { + /* load/store multiple */ + TCGv loaded_var; + TCGV_UNUSED(loaded_var); + rn = (insn >> 8) & 0x7; + addr = load_reg(s, rn); for (i = 0; i < 8; i++) { if (insn & (1 << i)) { if (insn & (1 << 11)) { - /* pop */ + /* load */ tmp = gen_ld32(addr, IS_USER(s)); - store_reg(s, i, tmp); + if (i == rn) { + loaded_var = tmp; + } else { + store_reg(s, i, tmp); + } } else { - /* push */ + /* store */ tmp = load_reg(s, i); gen_st32(tmp, addr, IS_USER(s)); } - /* advance to the next address. */ + /* advance to the next address */ tcg_gen_addi_i32(addr, addr, 4); } } - TCGV_UNUSED(tmp); - if (insn & (1 << 8)) { + if ((insn & (1 << rn)) == 0) { + /* base reg not in list: base register writeback */ + store_reg(s, rn, addr); + } else { + /* base reg in list: if load, complete it now */ if (insn & (1 << 11)) { - /* pop pc */ - tmp = gen_ld32(addr, IS_USER(s)); - /* don't set the pc until the rest of the instruction - has completed */ - } else { - /* push lr */ - tmp = load_reg(s, 14); - gen_st32(tmp, addr, IS_USER(s)); - } - tcg_gen_addi_i32(addr, addr, 4); - } - if ((insn & (1 << 11)) == 0) { - tcg_gen_addi_i32(addr, addr, -offset); - } - /* write back the new stack pointer */ - store_reg(s, 13, addr); - /* set the new PC value */ - if ((insn & 0x0900) == 0x0900) { - store_reg_from_load(env, s, 15, tmp); - // To find how many other regs pop with pc - if(env->v7m.exception != 0&&IS_M(env)){ - count=0; - for(k=0;k<8;k++){ - if((insn & (1 << k)) != 0) - count++; - } - val = ldl_phys(env->regs[13]+count*4); - // if pop pc is EXC_RETURN invode interrupt exit. - if(val>0xffff0000){ - gen_exception(EXCP_EXCEPTION_EXIT); - s->is_jmp=DISAS_UPDATE; - } + store_reg(s, rn, loaded_var); } + tcg_temp_free_i32(addr); } break; + } + case 13: + /* conditional branch or swi */ + cond = (insn >> 8) & 0xf; + if (cond == 0xe) + goto undef; - case 1: case 3: case 9: case 11: /* czb */ - rm = insn & 7; - tmp = load_reg(s, rm); - s->condlabel = gen_new_label(); - s->condjmp = 1; - if (insn & (1 << 11)) - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); - else - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel); - tcg_temp_free_i32(tmp); - offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; - val = (uint32_t)s->pc + 2; - val += offset; - gen_jmp(s, val); - break; - - case 15: /* IT, nop-hint. */ - if ((insn & 0xf) == 0) { - gen_nop_hint(s, (insn >> 4) & 0xf); + if (cond == 0xf) { + /* swi */ + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_SWI; break; } - /* If Then. */ - s->condexec_cond = (insn >> 4) & 0xe; - s->condexec_mask = insn & 0x1f; - /* No actual code generated for this insn, just setup state. */ - break; - - case 0xe: /* bkpt */ - ARCH(5); - gen_exception_insn(s, 2, EXCP_BKPT); - break; - - case 0xa: /* rev */ - ARCH(6); - rn = (insn >> 3) & 0x7; - rd = insn & 0x7; - tmp = load_reg(s, rn); - switch ((insn >> 6) & 3) { - case 0: tcg_gen_bswap32_i32(tmp, tmp); break; - case 1: gen_rev16(tmp); break; - case 3: gen_revsh(tmp); break; - default: goto illegal_op; - } - store_reg(s, rd, tmp); - break; + /* generate a conditional jump to next instruction */ + s->condlabel = gen_new_label(); + gen_test_cc(cond ^ 1, s->condlabel); + s->condjmp = 1; - case 6: - switch ((insn >> 5) & 7) { - case 2: - /* setend */ - ARCH(6); - if (((insn >> 3) & 1) != s->bswap_code) { - /* Dynamic endianness switching not implemented. */ - goto illegal_op; - } - break; - case 3: - /* cps */ - ARCH(6); - if (IS_USER(s)) { - break; - } - if (IS_M(env)) { - tmp = tcg_const_i32((insn & (1 << 4)) != 0); - /* FAULTMASK */ - if (insn & 1) { - addr = tcg_const_i32(19); - gen_helper_v7m_msr(cpu_env, addr, tmp); - tcg_temp_free_i32(addr); - } - /* PRIMASK */ - if (insn & 2) { - addr = tcg_const_i32(16); - gen_helper_v7m_msr(cpu_env, addr, tmp); - tcg_temp_free_i32(addr); - } - tcg_temp_free_i32(tmp); - gen_lookup_tb(s); - } else { - if (insn & (1 << 4)) { - shift = CPSR_A | CPSR_I | CPSR_F; - } else { - shift = 0; - } - gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); - } - break; - default: - goto undef; - } + /* jump to the offset */ + val = (uint32_t) s->pc + 2; + offset = ((int32_t) insn << 24) >> 24; + val += offset << 1; + gen_jmp(s, val); break; - default: - goto undef; - } - break; - - case 12: - { - /* load/store multiple */ - TCGv loaded_var; - TCGV_UNUSED(loaded_var); - rn = (insn >> 8) & 0x7; - addr = load_reg(s, rn); - for (i = 0; i < 8; i++) { - if (insn & (1 << i)) { - if (insn & (1 << 11)) { - /* load */ - tmp = gen_ld32(addr, IS_USER(s)); - if (i == rn) { - loaded_var = tmp; - } else { - store_reg(s, i, tmp); - } - } else { - /* store */ - tmp = load_reg(s, i); - gen_st32(tmp, addr, IS_USER(s)); - } - /* advance to the next address */ - tcg_gen_addi_i32(addr, addr, 4); - } - } - if ((insn & (1 << rn)) == 0) { - /* base reg not in list: base register writeback */ - store_reg(s, rn, addr); - } else { - /* base reg in list: if load, complete it now */ + case 14: if (insn & (1 << 11)) { - store_reg(s, rn, loaded_var); + if (disas_thumb2_insn(env, s, insn)) + goto undef32; + break; } - tcg_temp_free_i32(addr); - } - break; - } - case 13: - /* conditional branch or swi */ - cond = (insn >> 8) & 0xf; - if (cond == 0xe) - goto undef; - - if (cond == 0xf) { - /* swi */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_SWI; + /* unconditional branch */ + val = (uint32_t) s->pc; + offset = ((int32_t) insn << 21) >> 21; + val += (offset << 1) + 2; + gen_jmp(s, val); break; - } - /* generate a conditional jump to next instruction */ - s->condlabel = gen_new_label(); - gen_test_cc(cond ^ 1, s->condlabel); - s->condjmp = 1; - - /* jump to the offset */ - val = (uint32_t)s->pc + 2; - offset = ((int32_t)insn << 24) >> 24; - val += offset << 1; - gen_jmp(s, val); - break; - case 14: - if (insn & (1 << 11)) { + case 15: if (disas_thumb2_insn(env, s, insn)) - goto undef32; + goto undef32; break; - } - /* unconditional branch */ - val = (uint32_t)s->pc; - offset = ((int32_t)insn << 21) >> 21; - val += (offset << 1) + 2; - gen_jmp(s, val); - break; - - case 15: - if (disas_thumb2_insn(env, s, insn)) - goto undef32; - break; } return; undef32: @@ -9897,10 +10211,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) /* generate intermediate code in gen_opc_buf and gen_opparam_buf for basic block 'tb'. If search_pc is TRUE, also generate PC information for each intermediate instruction. */ -static inline void gen_intermediate_code_internal(CPUARMState *env, - TranslationBlock *tb, - int search_pc) -{ +static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationBlock *tb, int search_pc) { DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; uint16_t *gen_opc_end; @@ -9946,7 +10257,7 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, if (max_insns == 0) max_insns = CF_COUNT_MASK; - //gen_icount_start(); + // gen_icount_start(); tcg_clear_temp_count(); @@ -9983,12 +10294,11 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ - if (dc->condexec_mask || dc->condexec_cond) - { + if (dc->condexec_mask || dc->condexec_cond) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); - } + } do { #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ @@ -10000,18 +10310,18 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, break; } #else - /* We move this judgement to the previous round by using - * function gen_exception(EXCP_EXCEPTION_EXIT) as same as Qemu 3.0 */ - /* if (dc->pc >= 0xfffffff0 && IS_M(env)) { - gen_exception(EXCP_EXCEPTION_EXIT); - dc->is_jmp = DISAS_UPDATE; - break; - } - */ +/* We move this judgement to the previous round by using + * function gen_exception(EXCP_EXCEPTION_EXIT) as same as Qemu 3.0 */ +/* if (dc->pc >= 0xfffffff0 && IS_M(env)) { + gen_exception(EXCP_EXCEPTION_EXIT); + dc->is_jmp = DISAS_UPDATE; + break; + } +*/ #endif if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { - QTAILQ_FOREACH(bp, &env->breakpoints, entry) { + QTAILQ_FOREACH (bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will @@ -10035,18 +10345,17 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, gen_opc_icount[lj] = num_insns; } - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - //gen_io_start(); + // if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); -// if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { -// tcg_gen_debug_insn_start(dc->pc); -// } + // if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { + // tcg_gen_debug_insn_start(dc->pc); + // } if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { - dc->condexec_cond = (dc->condexec_cond & 0xe) - | ((dc->condexec_mask >> 4) & 1); + dc->condexec_cond = (dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; @@ -10069,12 +10378,9 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ - num_insns ++; - } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && - !env->singlestep_enabled && - !singlestep && - dc->pc < next_page_start && - num_insns < max_insns); + num_insns++; + } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && !singlestep && + dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { if (dc->condjmp) { @@ -10082,7 +10388,7 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, code. */ cpu_abort(env, "IO on conditional branch instruction"); } - //gen_io_end(); + // gen_io_end(); } /* At this stage dc->condjmp will only be set when the skipped @@ -10121,25 +10427,25 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, Hardware breakpoints have already been handled and skip this code. */ gen_set_condexec(dc); - switch(dc->is_jmp) { - case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - case DISAS_WFI: - gen_helper_wfi(); - break; - case DISAS_SWI: - gen_exception(EXCP_SWI); - break; + switch (dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + case DISAS_WFI: + gen_helper_wfi(); + break; + case DISAS_SWI: + gen_exception(EXCP_SWI); + break; } if (dc->condjmp) { gen_set_label(dc->condlabel); @@ -10150,18 +10456,18 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, } done_generating: - //gen_icount_end(tb, num_insns); + // gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; -//#ifdef DEBUG_DISAS -// if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { -// qemu_log("----------------\n"); -// qemu_log("IN: %s\n", lookup_symbol(pc_start)); -// log_target_disas(pc_start, dc->pc - pc_start, -// dc->thumb | (dc->bswap_code << 1)); -// qemu_log("\n"); -// } -//#endif + //#ifdef DEBUG_DISAS + // if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + // qemu_log("----------------\n"); + // qemu_log("IN: %s\n", lookup_symbol(pc_start)); + // log_target_disas(pc_start, dc->pc - pc_start, + // dc->thumb | (dc->bswap_code << 1)); + // qemu_log("\n"); + // } + //#endif if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; @@ -10173,24 +10479,18 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, } } -void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) -{ +void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) { gen_intermediate_code_internal(env, tb, 0); } -void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) -{ +void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) { gen_intermediate_code_internal(env, tb, 1); } -static const char *cpu_mode_names[16] = { - "usr", "fiq", "irq", "svc", "???", "???", "???", "abt", - "???", "???", "???", "und", "???", "???", "???", "sys" -}; +static const char *cpu_mode_names[16] = {"usr", "fiq", "irq", "svc", "???", "???", "???", "abt", + "???", "???", "???", "und", "???", "???", "???", "sys"}; -void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ +void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int i; #if 0 union { @@ -10207,7 +10507,7 @@ void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, #endif uint32_t psr; - for(i=0;i<16;i++) { + for (i = 0; i < 16; i++) { cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); if ((i % 4) == 3) cpu_fprintf(f, "\n"); @@ -10215,13 +10515,8 @@ void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, cpu_fprintf(f, " "); } psr = cpsr_read(env); - cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n", - psr, - psr & (1 << 31) ? 'N' : '-', - psr & (1 << 30) ? 'Z' : '-', - psr & (1 << 29) ? 'C' : '-', - psr & (1 << 28) ? 'V' : '-', - psr & CPSR_T ? 'T' : 'A', + cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n", psr, psr & (1 << 31) ? 'N' : '-', psr & (1 << 30) ? 'Z' : '-', + psr & (1 << 29) ? 'C' : '-', psr & (1 << 28) ? 'V' : '-', psr & CPSR_T ? 'T' : 'A', cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); #if 0 @@ -10240,8 +10535,7 @@ void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, #endif } -void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) -{ +void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) { env->regs[15] = gen_opc_pc[pc_pos]; env->condexec_bits = gen_opc_condexec_bits[pc_pos]; } From c6530a948d5a01010131e5b31afbaf8bb2466b29 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 1 Nov 2019 20:29:48 -0400 Subject: [PATCH 08/59] Made arm library usable from C++ code Signed-off-by: chaojixx --- include/cpu/arm/cpu.h | 8 ++++++++ include/cpu/arm/defs.h | 7 +++++++ 2 files changed, 15 insertions(+) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index aa39808..cc7027c 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -32,6 +32,10 @@ #include "defs.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef void ARMWriteCPFunc(void *opaque, int cp_info, int srcreg, int operand, uint32_t value); typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, int dstreg, int operand); @@ -254,4 +258,8 @@ enum arm_cpu_mode { static inline int cpu_mmu_index(CPUARMState *env) { return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0; } +#ifdef __cplusplus +} +#endif + #endif diff --git a/include/cpu/arm/defs.h b/include/cpu/arm/defs.h index 8164712..d7dd2b0 100644 --- a/include/cpu/arm/defs.h +++ b/include/cpu/arm/defs.h @@ -19,6 +19,9 @@ #ifndef __CPU_ARM_DEFS__ #define __CPU_ARM_DEFS__ +#ifdef __cplusplus +extern "C" { +#endif // clang-format off /*******************************************/ @@ -31,4 +34,8 @@ #define TARGET_HAS_ICE 1 +#ifdef __cplusplus +} +#endif + #endif From c8cf7dc93233f3f4d248347e127e942393bece1d Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 7 Nov 2019 10:15:34 -0500 Subject: [PATCH 09/59] fpu:no support symbex mode for arm Signed-off-by: chaojixx --- include/fpu/softfloat.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index 238a6ef..0ab9884 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -77,11 +77,19 @@ typedef int64_t int64; #define STATUS_VAR , status #if defined(CONFIG_SYMBEX) && !defined(SYMBEX_LLVM_LIB) +#if defined(TARGET_I386) || defined(TARGET_X86_64) uint8_t RR_cpu_float_status(void *p, unsigned size); void WR_cpu_float_status(void *p, unsigned size, int v); #define STATUS(field) RR_cpu_float_status(&status->field, sizeof(status->field)) #define STATUS_W(field, v) WR_cpu_float_status(&status->field, sizeof(status->field), v) +#elif defined(TARGET_ARM) +#define STATUS(field) status->field +#define STATUS_W(field, v) status->field = v +#else +#error Unsupported target architecture +#endif + #else #define STATUS(field) status->field #define STATUS_W(field, v) status->field = v From 5a50aad89c9c86ae5edba78176d3cca9d29167d7 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 7 Nov 2019 10:21:51 -0500 Subject: [PATCH 10/59] fix ram_access for compatibility Signed-off-by: chaojixx --- src/cpu-all.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpu-all.h b/src/cpu-all.h index 452b594..c04c412 100644 --- a/src/cpu-all.h +++ b/src/cpu-all.h @@ -174,7 +174,7 @@ static inline int _se_check_concrete(void *objectState, target_ulong offset, int static inline void *_se_check_translate_ram_access(const void *p, unsigned size) { #if defined(SE_ENABLE_PHYSRAM_TLB) extern CPUArchState *env; - uintptr_t tlb_index = ((uintptr_t) p >> 12) & (CPU_TLB_SIZE - 1); + uintptr_t tlb_index = ((uintptr_t) p >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); CPUTLBRAMEntry *re = &env->se_ram_tlb[tlb_index]; if (re->host_page == (((uintptr_t) p) & (~(uintptr_t) 0xfff | (size - 1)))) { return (void *) ((uintptr_t) p + re->addend); From 95d083aea186a5cba4c214d1bb1d6f5b5f4e2965 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Sat, 16 Nov 2019 17:49:36 -0500 Subject: [PATCH 11/59] io_read: fix mmio_access return Signed-off-by: chaojixx --- src/softmmu_template.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/softmmu_template.h b/src/softmmu_template.h index 8119308..c6f87b1 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -234,7 +234,7 @@ DATA_TYPE glue(glue(io_read_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_p res.res = glue(glue(io_read, SUFFIX), MMUSUFFIX)(env, origaddr, addr, retaddr); end: - tcg_llvm_trace_mmio_access(addr, res.res, DATA_SIZE, 0); + res.res = tcg_llvm_trace_mmio_access(addr, res.res, DATA_SIZE, 0); SE_SET_MEM_IO_VADDR(env, 0, 1); return res.res; From b5556c287fb87374bedaefbc4db09c9e6f23eb72 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 2 Dec 2019 17:50:12 -0500 Subject: [PATCH 12/59] target-arm/translate: fix exc_return value Signed-off-by: chaojixx --- src/target-arm/translate.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 6b534ad..3312b83 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -9618,9 +9618,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { gen_bx(s, tmp); } else { gen_bx(s, tmp); - if (env->v7m.exception != 0 && IS_M(env) && env->regs[14] > 0xf0000000) { - // printf("interrupt pc=0x%x\n", env->regs[15]); - // printf("interrupt lr=0x%x\n", env->regs[14]); + if (env->v7m.exception != 0 && IS_M(env) && env->regs[14] >= 0xfffffff0) { gen_exception(EXCP_EXCEPTION_EXIT); s->is_jmp = DISAS_UPDATE; } @@ -10005,7 +10003,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { } val = ldl_phys(env->regs[13] + count * 4); // if pop pc is EXC_RETURN invode interrupt exit. - if (val > 0xffff0000) { + if (val >= 0xfffffff0) { gen_exception(EXCP_EXCEPTION_EXIT); s->is_jmp = DISAS_UPDATE; } From 3789ea0c738d45459987c4b5c4bed2e7604360ac Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 2 Dec 2019 17:38:42 -0500 Subject: [PATCH 13/59] log: add log and log defination Signed-off-by: chaojixx --- src/cpu-exec.c | 19 ++++++++++--------- src/target-arm/helper.c | 24 ++++++++++++++++++++++-- src/target-arm/translate.c | 9 +++++++++ src/translate-all.c | 8 ++++++++ 4 files changed, 49 insertions(+), 11 deletions(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 4cb018e..f3e74b5 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -28,7 +28,7 @@ #define barrier() asm volatile("" ::: "memory") -// #define DEBUG_EXEC +#define DEBUG_EXEC // #define TRACE_EXEC #ifdef DEBUG_EXEC @@ -162,8 +162,6 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env) { } #endif - DPRINTF("Current pc=0x%x: \n", env->regs[15]); - /* we record a subset of the CPU state. It will always be the same before a given translated block is executed. */ @@ -450,12 +448,15 @@ static bool process_interrupt_request(CPUArchState *env) { // in case lower prioriy interrupt so add armv7m_nvic_can_take_pending_exception // in case basepri has not been synced so add exit code condition - if (interrupt_request & CPU_INTERRUPT_HARD && - ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I)) && - (armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { - env->exception_index = EXCP_IRQ; - do_interrupt(env); - has_interrupt = true; + if ((interrupt_request & CPU_INTERRUPT_HARD) && + ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { + if ((armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { + env->exception_index = EXCP_IRQ; + do_interrupt(env); + has_interrupt = true; + } else { + DPRINTF("cpu basepri = %d take_exc = %d kvm_exit_code = %d\n", env->v7m.basepri, armv7m_nvic_can_take_pending_exception(env->nvic), env->kvm_exit_code); + } } #endif /* Don't use the cached interrupt_request value, diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 819e535..c835f5b 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -26,6 +26,14 @@ #include "host-utils.h" +#define DEBUG_HELPER + +#ifdef DEBUG_HELPER +#define HPRINTF(...) printf(__VA_ARGS__) +#else +#define HPRINTF(...) +#endif + int semihosting_enabled = 0; int smp_cpus = 1; @@ -676,6 +684,8 @@ static void switch_v7m_sp(CPUARMState *env, int process) { env->v7m.other_sp = env->regs[13]; env->regs[13] = tmp; env->v7m.current_sp = process; + } else { + HPRINTF(" already in handle mode current_sp = 0x%x, other_sp = 0x%x", env->v7m.current_sp, env->v7m.other_sp); } } @@ -696,6 +706,7 @@ static void do_v7m_exception_exit(CPUARMState *env) { env->regs[3] = v7m_pop(env); env->regs[12] = v7m_pop(env); env->regs[14] = v7m_pop(env); + HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); env->regs[15] = v7m_pop(env); xpsr = v7m_pop(env); xpsr_write(env, xpsr, 0xfffffdff); @@ -722,7 +733,7 @@ void do_interrupt_v7m(CPUARMState *env) { lr |= 4; if (env->v7m.exception == 0) lr |= 8; - // printf("interreput = 0x%x\n",env->exception_index); + // HPRINTF("interreput = 0x%x\n",env->exception_index); /* For exceptions we just mark as pending on the NVIC, and let that handle it. */ switch (env->exception_index) { @@ -769,8 +780,10 @@ void do_interrupt_v7m(CPUARMState *env) { env->regs[13] -= 4; xpsr |= 0x200; } + v7m_push(env, xpsr); v7m_push(env, env->regs[15]); + HPRINTF(" interrupt r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); v7m_push(env, env->regs[14]); v7m_push(env, env->regs[12]); v7m_push(env, env->regs[3]); @@ -790,6 +803,7 @@ void do_interrupt_v7m(CPUARMState *env) { env->regs[14] = lr; addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); env->regs[15] = addr & 0xfffffffe; + HPRINTF("addr = %x vecbase = %d exce = %d\n", addr, env->v7m.vecbase, env->v7m.exception); env->thumb = addr & 1; } @@ -2243,6 +2257,9 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { case 17: /* BASEPRI */ env->v7m.basepri = val & 0xff; env->kvm_exit_code = 1; +/* #ifdef CONFIG_SYMBEX */ + /* WR_cpu(env, v7m.basepri, val & 0xff); */ +/* #endif */ cpu_exit(env); break; case 18: /* BASEPRI_MAX */ @@ -2250,7 +2267,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) { env->v7m.basepri = val; env->kvm_exit_code = 1; - cpu_exit(env); +/* #ifdef CONFIG_SYMBEX */ + /* WR_cpu(env, v7m.basepri, val & 0xff); */ +/* #endif */ + cpu_exit(env); } break; case 19: /* FAULTMASK */ diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 3312b83..ad7f1b4 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -39,6 +39,14 @@ #include #endif +#define DEBUG_TS + +#ifdef DEBUG_TS +#define TPRINTF(...) printf(__VA_ARGS__) +#else +#define TPRINTF(...) +#endif + #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T) #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5) /* currently all emulated v5 cores are also v5TE, so don't bother */ @@ -9995,6 +10003,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if ((insn & 0x0900) == 0x0900) { store_reg_from_load(env, s, 15, tmp); // To find how many other regs pop with pc + TPRINTF("pc = 0x%x r0 = 0x%x\n", env->regs[15],env->regs[0]); if (env->v7m.exception != 0 && IS_M(env)) { count = 0; for (k = 0; k < 8; k++) { diff --git a/src/translate-all.c b/src/translate-all.c index ec86479..ea22884 100644 --- a/src/translate-all.c +++ b/src/translate-all.c @@ -47,6 +47,14 @@ #include "exec-tb.h" #include "exec.h" +#define DEBUG_TS + +#ifdef DEBUG_TS +#define TPRINTF(...) fprintf(logfile, __VA_ARGS__) +#else +#define TPRINTF(...) +#endif + /* code generation context */ __thread TCGContext *tcg_ctx; From 7e700de0e59343df5e1a30be464b0eae3aa1c47f Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 2 Dec 2019 17:47:17 -0500 Subject: [PATCH 14/59] helper: replace w/r regs with WR/RR Signed-off-by: chaojixx --- src/target-arm/helper.c | 151 ++++++++++++++++++++++--------------- src/target-arm/translate.c | 6 +- 2 files changed, 95 insertions(+), 62 deletions(-) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index c835f5b..fcee212 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -525,19 +525,35 @@ static int bad_mode_switch(CPUARMState *env, int mode) { } uint32_t cpsr_read(CPUARMState *env) { - int ZF; - ZF = (env->ZF == 0); - return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | - (env->QF << 27) | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) | ((env->condexec_bits & 0xfc) << 8) | - (env->GE << 16); + // These bits (ZF, NF, CF, VF) may be symbolic + target_ulong ZF, NF, CF, VF; + ZF = (RR_cpu(env,ZF) == 0); + NF = (RR_cpu(env,NF) & 0x80000000); + CF = (RR_cpu(env,CF) << 29); + VF = ((RR_cpu(env,VF) & 0x80000000) >> 3); + + // These bits instead are are always concrete + target_ulong QF, thumb, condex1, condex2, GE; + QF = (env->QF << 27); + thumb = (env->thumb << 5); + condex1 = ((env->condexec_bits & 3) << 25); + condex2 = ((env->condexec_bits & 0xfc) << 8); + GE = (env->GE << 16); + + // Re-assemble the cpsr + return env->uncached_cpsr | NF | (ZF << 30) | + CF | VF | QF + | thumb | condex1 + | condex2 + | GE; } void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { if (mask & CPSR_NZCV) { - env->ZF = (~val) & CPSR_Z; - env->NF = val; - env->CF = (val >> 29) & 1; - env->VF = (val << 3) & 0x80000000; + WR_cpu(env,ZF,((~val) & CPSR_Z)); + WR_cpu(env,NF,val); + WR_cpu(env,CF,((val >> 29) & 1)); + WR_cpu(env,VF,((val << 3) & 0x80000000)); } if (mask & CPSR_Q) env->QF = ((val & CPSR_Q) != 0); @@ -646,33 +662,56 @@ void switch_mode(CPUARMState *env, int mode) { return; if (old_mode == ARM_CPU_MODE_FIQ) { - memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + //memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + WR_cpu(env,fiq_regs[0],RR_cpu(env,regs[8])); + WR_cpu(env,fiq_regs[1],RR_cpu(env,regs[9])); + WR_cpu(env,fiq_regs[2],RR_cpu(env,regs[10])); + WR_cpu(env,fiq_regs[3],RR_cpu(env,regs[11])); + WR_cpu(env,fiq_regs[4],RR_cpu(env,regs[12])); + + //memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + WR_cpu(env,regs[8],RR_cpu(env,usr_regs[0])); + WR_cpu(env,regs[9],RR_cpu(env,usr_regs[1])); + WR_cpu(env,regs[10],RR_cpu(env,usr_regs[2])); + WR_cpu(env,regs[11],RR_cpu(env,usr_regs[3])); + WR_cpu(env,regs[12],RR_cpu(env,usr_regs[4])); + } else if (mode == ARM_CPU_MODE_FIQ) { - memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + //memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + WR_cpu(env,usr_regs[0],RR_cpu(env,regs[8])); + WR_cpu(env,usr_regs[1],RR_cpu(env,regs[9])); + WR_cpu(env,usr_regs[2],RR_cpu(env,regs[10])); + WR_cpu(env,usr_regs[3],RR_cpu(env,regs[11])); + WR_cpu(env,usr_regs[4],RR_cpu(env,regs[12])); + + //memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + WR_cpu(env,regs[8],RR_cpu(env,fiq_regs[0])); + WR_cpu(env,regs[9],RR_cpu(env,fiq_regs[1])); + WR_cpu(env,regs[10],RR_cpu(env,fiq_regs[2])); + WR_cpu(env,regs[11],RR_cpu(env,fiq_regs[3])); + WR_cpu(env,regs[12],RR_cpu(env,fiq_regs[4])); } i = bank_number(env, old_mode); - env->banked_r13[i] = env->regs[13]; - env->banked_r14[i] = env->regs[14]; - env->banked_spsr[i] = env->spsr; + WR_cpu(env,banked_r13[i], RR_cpu(env,regs[13])); + WR_cpu(env,banked_r14[i], RR_cpu(env,regs[14])); + WR_cpu(env,banked_spsr[i], RR_cpu(env,spsr)); i = bank_number(env, mode); - env->regs[13] = env->banked_r13[i]; - env->regs[14] = env->banked_r14[i]; - env->spsr = env->banked_spsr[i]; + WR_cpu(env,regs[13],RR_cpu(env,banked_r13[i])); + WR_cpu(env,regs[14],RR_cpu(env,banked_r14[i])); + WR_cpu(env,spsr,RR_cpu(env,banked_spsr[i])); } static void v7m_push(CPUARMState *env, uint32_t val) { - env->regs[13] -= 4; - stl_phys(env->regs[13], val); + WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) - 4)); + stl_phys(RR_cpu(env,regs[13]), val); } static uint32_t v7m_pop(CPUARMState *env) { uint32_t val; - val = ldl_phys(env->regs[13]); - env->regs[13] += 4; + val = ldl_phys(RR_cpu(env,regs[13])); + WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) + 4)); return val; } @@ -681,8 +720,8 @@ static void switch_v7m_sp(CPUARMState *env, int process) { uint32_t tmp; if (env->v7m.current_sp != process) { tmp = env->v7m.other_sp; - env->v7m.other_sp = env->regs[13]; - env->regs[13] = tmp; + env->v7m.other_sp = RR_cpu(env,regs[13]); + WR_cpu(env,regs[13],tmp); env->v7m.current_sp = process; } else { HPRINTF(" already in handle mode current_sp = 0x%x, other_sp = 0x%x", env->v7m.current_sp, env->v7m.other_sp); @@ -700,19 +739,19 @@ static void do_v7m_exception_exit(CPUARMState *env) { /* Switch to the target stack. */ switch_v7m_sp(env, (type & 4) != 0); /* Pop registers. */ - env->regs[0] = v7m_pop(env); - env->regs[1] = v7m_pop(env); - env->regs[2] = v7m_pop(env); - env->regs[3] = v7m_pop(env); - env->regs[12] = v7m_pop(env); - env->regs[14] = v7m_pop(env); + WR_cpu(env,regs[0],v7m_pop(env)); + WR_cpu(env,regs[1],v7m_pop(env)); + WR_cpu(env,regs[2],v7m_pop(env)); + WR_cpu(env,regs[3],v7m_pop(env)); + WR_cpu(env,regs[12],v7m_pop(env)); + WR_cpu(env,regs[14],v7m_pop(env)); HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); env->regs[15] = v7m_pop(env); xpsr = v7m_pop(env); xpsr_write(env, xpsr, 0xfffffdff); /* Undo stack alignment. */ if (xpsr & 0x200) - env->regs[13] |= 4; + WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) | 4)); /* ??? The exception return type specifies Thread/Handler mode. However this is also implied by the xPSR value. Not sure what to do if there is a mismatch. */ @@ -754,7 +793,7 @@ void do_interrupt_v7m(CPUARMState *env) { nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; if (nr == 0xab) { env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); + WR_cpu(env,regs[0],do_arm_semihosting(env)); return; } } @@ -776,20 +815,20 @@ void do_interrupt_v7m(CPUARMState *env) { /* Align stack pointer. */ /* ??? Should only do this if Configuration Control Register STACKALIGN bit is set. */ - if (env->regs[13] & 4) { - env->regs[13] -= 4; + if (RR_cpu(env,regs[13]) & 4) { + WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) - 4)); xpsr |= 0x200; } v7m_push(env, xpsr); v7m_push(env, env->regs[15]); HPRINTF(" interrupt r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); - v7m_push(env, env->regs[14]); - v7m_push(env, env->regs[12]); - v7m_push(env, env->regs[3]); - v7m_push(env, env->regs[2]); - v7m_push(env, env->regs[1]); - v7m_push(env, env->regs[0]); + v7m_push(env, RR_cpu(env,regs[14])); + v7m_push(env, RR_cpu(env,regs[12])); + v7m_push(env, RR_cpu(env,regs[3])); + v7m_push(env, RR_cpu(env,regs[2])); + v7m_push(env, RR_cpu(env,regs[1])); + v7m_push(env, RR_cpu(env,regs[0])); /* Now we've done everything that might cause a derived exception * we can go ahead and activate whichever exception we're going to @@ -800,7 +839,7 @@ void do_interrupt_v7m(CPUARMState *env) { switch_v7m_sp(env, 0); /* Clear IT bits */ env->condexec_bits = 0; - env->regs[14] = lr; + WR_cpu(env,regs[14],lr); addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); env->regs[15] = addr & 0xfffffffe; HPRINTF("addr = %x vecbase = %d exce = %d\n", addr, env->v7m.vecbase, env->v7m.exception); @@ -851,7 +890,7 @@ void do_interrupt(CPUARMState *env) { semblance of security. */ if (((mask == 0x123456 && !env->thumb) || (mask == 0xab && env->thumb)) && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { - env->regs[0] = do_arm_semihosting(env); + WR_cpu(env,regs[0],do_arm_semihosting(env)); return; } } @@ -867,7 +906,7 @@ void do_interrupt(CPUARMState *env) { mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; if (mask == 0xab && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); + WR_cpu(env,regs[0],do_arm_semihosting(env)); return; } } @@ -908,7 +947,7 @@ void do_interrupt(CPUARMState *env) { addr += 0xffff0000; } switch_mode(env, new_mode); - env->spsr = cpsr_read(env); + WR_cpu(env,spsr,cpsr_read(env)); /* Clear IT bits. */ env->condexec_bits = 0; /* Switch to the new mode, and to the correct instruction set. */ @@ -919,7 +958,7 @@ void do_interrupt(CPUARMState *env) { if (arm_feature(env, ARM_FEATURE_V4T)) { env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0; } - env->regs[14] = env->regs[15] + offset; + WR_cpu(env,regs[14],(env->regs[15] + offset)); env->regs[15] = addr; env->interrupt_request |= CPU_INTERRUPT_EXITTB; } @@ -2163,7 +2202,7 @@ uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn) { void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) { if ((env->uncached_cpsr & CPSR_M) == mode) { - env->regs[13] = val; + WR_cpu(env,regs[13],val); } else { env->banked_r13[bank_number(env, mode)] = val; } @@ -2171,7 +2210,7 @@ void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) { uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) { if ((env->uncached_cpsr & CPSR_M) == mode) { - return env->regs[13]; + return RR_cpu(env,regs[13]); } else { return env->banked_r13[bank_number(env, mode)]; } @@ -2194,9 +2233,9 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) { case 7: /* IEPSR */ return xpsr_read(env) & 0x0700edff; case 8: /* MSP */ - return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; + return env->v7m.current_sp ? env->v7m.other_sp : RR_cpu(env,regs[13]); case 9: /* PSP */ - return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; + return env->v7m.current_sp ? RR_cpu(env,regs[13]) : env->v7m.other_sp; case 16: /* PRIMASK */ return (env->uncached_cpsr & CPSR_I) != 0; case 17: /* BASEPRI */ @@ -2240,11 +2279,11 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { if (env->v7m.current_sp) env->v7m.other_sp = val; else - env->regs[13] = val; + WR_cpu(env,regs[13],val); break; case 9: /* PSP */ if (env->v7m.current_sp) - env->regs[13] = val; + WR_cpu(env,regs[13],val); else env->v7m.other_sp = val; break; @@ -2257,9 +2296,6 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { case 17: /* BASEPRI */ env->v7m.basepri = val & 0xff; env->kvm_exit_code = 1; -/* #ifdef CONFIG_SYMBEX */ - /* WR_cpu(env, v7m.basepri, val & 0xff); */ -/* #endif */ cpu_exit(env); break; case 18: /* BASEPRI_MAX */ @@ -2267,10 +2303,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) { env->v7m.basepri = val; env->kvm_exit_code = 1; -/* #ifdef CONFIG_SYMBEX */ - /* WR_cpu(env, v7m.basepri, val & 0xff); */ -/* #endif */ - cpu_exit(env); + cpu_exit(env); } break; case 19: /* FAULTMASK */ diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index ad7f1b4..a549d12 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -10010,7 +10010,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if ((insn & (1 << k)) != 0) count++; } - val = ldl_phys(env->regs[13] + count * 4); + val = ldl_phys(RR_cpu(env,regs[13]) + count * 4); // if pop pc is EXC_RETURN invode interrupt exit. if (val >= 0xfffffff0) { gen_exception(EXCP_EXCEPTION_EXIT); @@ -10514,8 +10514,8 @@ void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, int #endif uint32_t psr; - for (i = 0; i < 16; i++) { - cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); + for (i = 0; i < 15; i++) { + cpu_fprintf(f, "R%02d=%08x", i, RR_cpu(env,regs[i])); if ((i % 4) == 3) cpu_fprintf(f, "\n"); else From 44b40c9555582a2a64cfe726215f3472bebe9a34 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 2 Dec 2019 16:55:39 -0500 Subject: [PATCH 15/59] target-arm/translate: deal with CONFIG_SYMBEX mode according to x86 se_call and se_ret are unimplentmented Signed-off-by: chaojixx --- src/target-arm/translate.c | 178 ++++++++++++++++++++++++++++++++++++- 1 file changed, 177 insertions(+), 1 deletion(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index a549d12..512c8fa 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -101,7 +101,11 @@ typedef struct DisasContext { int invalid_instr; /* tb contains invalid instruction */ #endif } DisasContext; - +#ifdef CONFIG_SYMBEX +#define SET_TB_TYPE(t) s->tb->se_tb_type = t; +#else +#define SET_TB_TYPE(t) +#endif static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; #if defined(CONFIG_USER_ONLY) @@ -135,7 +139,85 @@ static TCGv_i64 cpu_F0d, cpu_F1d; static const char *regnames[] = {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc"}; +#ifdef CONFIG_SYMBEX +static inline void instr_translate_compute_reg_mask_end(DisasContext *dc) { + uint64_t rmask, wmask, accesses_mem; + + if (dc->done_reg_access_end) { + return; + } + + tcg_calc_regmask_ex(&tcg_ctx, &rmask, &wmask, &accesses_mem, dc->ins_opc, dc->ins_arg); + + // First five bits contain flag registers + rmask >>= 5; + wmask >>= 5; + + g_sqi.events.on_translate_register_access(dc->tb, dc->insPc, rmask, wmask, (int) accesses_mem); + + dc->done_reg_access_end = 1; +} +#endif + +#ifdef CONFIG_SYMBEX +void *g_invokeCallRetInstrumentation __attribute__((weak)); + +static inline void instr_gen_call_ret(DisasContext *s, int isCall) { + if (likely(!*g_sqi.events.on_call_return_signals_count)) { + return; + } + + int instrument = g_sqi.events.on_call_return_translate(s->pc, isCall); + if (!instrument) { + return; + } + TPRINTF("NOT Implenmented %d", instrument); + +/* int clabel = gen_new_label(); + + tcg_gen_movi_i32(cpu_tmp1_i32, (uintptr_t) &g_invokeCallRetInstrumentation); + + tcg_gen_ld_i32(cpu_tmp1_i32, TCGV_NAT_TO_PTR(cpu_tmp1_i32), 0); + tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_tmp1_i32, 0, clabel); + + tcg_gen_movi_tl(cpu_T[1], s->insPc); + + if (isCall) { + gen_helper_se_call(cpu_T[1]); + } else { + gen_helper_se_ret(cpu_T[1]); + } + + gen_set_label(clabel); +*/ +} + +static inline void gen_instr_end(DisasContext *s) { + if (unlikely(*g_sqi.events.on_translate_instruction_end_signals_count && s->instrument)) + g_sqi.events.on_translate_instruction_end(s, s->tb, s->insPc, s->useNextPc ? s->nextPc : (uint64_t) -1); +} + +static inline void gen_eob_event(DisasContext *s, int static_target, target_ulong target_pc) { + gen_instr_end(s); + + if (unlikely(*g_sqi.events.on_translate_register_access_signals_count && s->instrument)) + instr_translate_compute_reg_mask_end(s); + + if (unlikely(*g_sqi.events.on_translate_block_end_signals_count && s->instrument)) + g_sqi.events.on_translate_block_end(s->tb, s->insPc, static_target, target_pc); + + if (unlikely(*g_sqi.events.on_call_return_signals_count && s->instrument)) { + if (s->tb->se_tb_type == TB_CALL || s->tb->se_tb_type == TB_CALL_IND) { + instr_gen_call_ret(s, 1); + } else if (s->tb->se_tb_type == TB_RET) { + instr_gen_call_ret(s, 0); + } + } + + s->done_instr_end = 1; +} +#endif /* initialize TCG globals. */ void arm_translate_init(void) { int i; @@ -3659,6 +3741,9 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) { TranslationBlock *tb; tb = s->tb; +#ifdef CONFIG_SYMBEX + gen_eob_event(s, 1, dest); +#endif if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { tcg_gen_goto_tb(n); gen_set_pc_im(dest); @@ -10268,6 +10353,48 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB tcg_clear_temp_count(); +#ifdef CONFIG_SYMBEX + tb_precise_pc_t *p; + + /* When doing retranslation to LLVM, avoid clobbering + * existing pc recovery info, which is relied upon by + * the existing machine code. */ + int generate_pc_recovery_info = tb->precise_entries == 0; + + dc->invalid_instr = 0; + + if (!search_pc && generate_pc_recovery_info) { + p = tb->precise_pcs; + } + + if (search_pc) { + generate_pc_recovery_info = 0; + } + + dc->instrument = !search_pc; + + dc->enable_jmp_im = 1; + dc->cpuState = env; + tb->se_tb_type = TB_DEFAULT; + tb->se_tb_call_eip = 0; +/* Make sure to refer to the original TB instead of the + temporary one in case we regenerate LLVM bitcode */ +#ifndef STATIC_TRANSLATOR + TCGv_i64 tmp64; + tmp64 = tcg_temp_new_i64(); + if (tb->originalTb) { + tcg_gen_movi_i64(tmp64, (uint64_t) tb->originalTb); + } else { + tcg_gen_movi_i64(tmp64, (uint64_t) tb); + } + + tcg_gen_st_i64(tmp64, cpu_env, offsetof(CPUArchState, se_current_tb)); + + if (unlikely(*g_sqi.events.on_translate_block_start_signals_count && dc->instrument)) { + g_sqi.events.on_translate_block_start(dc, tb, pc_start); + } +#endif +#endif /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec @@ -10339,6 +10466,23 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } } } +#ifdef CONFIG_SYMBEX + /* Generate precise PC recovery information */ + if (generate_pc_recovery_info && !search_pc) { + int cur_opc = gen_opc_ptr - gen_opc_buf; + + if (num_insns > 0 && (p - 1)->opc == cur_opc) { + // The instruction was a nop + --p; + --tb->precise_entries; + } + + p->guest_pc_increment = dc->pc - pc_start; + p->opc = cur_opc; + ++p; + ++tb->precise_entries; + } +#endif if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { @@ -10358,7 +10502,22 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB // if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { // tcg_gen_debug_insn_start(dc->pc); // } +#ifdef CONFIG_SYMBEX + dc->insPc = dc->pc; + dc->done_instr_end = 0; + dc->done_reg_access_end = 0; + + if (unlikely(*g_sqi.events.on_translate_instruction_start_signals_count && dc->instrument)) { + g_sqi.events.on_translate_instruction_start(dc, tb, dc->pc); + } + tb->pcOfLastInstr = pc_start; + dc->useNextPc = 0; + dc->nextPc = -1; + + dc->ins_opc = gen_opc_ptr; + dc->ins_arg = gen_opparam_ptr; +#endif if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { @@ -10371,7 +10530,16 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } else { disas_arm_insn(env, dc); } +#ifdef CONFIG_SYMBEX + if (!dc->is_jmp) { + // Allow proper pc update for onTranslateInstruction events + dc->nextPc = dc->pc; + dc->useNextPc = 1; + // Jumps are treated specially, as they end the TB + gen_instr_end(dc); + } +#endif if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; @@ -10441,6 +10609,9 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB default: case DISAS_JUMP: case DISAS_UPDATE: +#ifdef CONFIG_SYMBEX + gen_eob_event(dc, 0, 0); +#endif /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; @@ -10483,6 +10654,11 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; +#ifdef CONFIG_SYMBEX + if (unlikely(*g_sqi.events.on_translate_block_complete_signals_count && dc->instrument)) { + g_sqi.events.on_translate_block_complete(tb, dc->insPc); + } +#endif } } From ac77643a8034a3f90472d6b53629ce2cf7ab00bb Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 12 Dec 2019 20:11:20 -0500 Subject: [PATCH 16/59] log: change the log location and information Signed-off-by: chaojixx --- src/cpu-exec.c | 2 +- src/target-arm/helper.c | 16 ++++++++++++---- src/target-arm/translate.c | 2 +- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index f3e74b5..89ddb9d 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -266,7 +266,7 @@ static uintptr_t fetch_and_run_tb(TranslationBlock *prev_tb, int tb_exit_code, C (uint64_t) env->eip, (uint64_t) env->eip + tb->size, (uint64_t) env->mflags, env->kvm_request_interrupt_window); #elif defined(TARGET_ARM) - DPRINTF(" fetch_and_run_tb r15=0x%x sp=0x%x cpsr=0x%x \n", (uint32_t) env->regs[15], env->regs[13], env->uncached_cpsr); + DPRINTF(" fetch_and_run_tb pc=0x%x sp=0x%x cpsr=0x%x \n", (uint32_t) env->regs[15], env->regs[13], env->uncached_cpsr); #else #error Unsupported target architecture #endif diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index fcee212..b9ed5e3 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -418,7 +418,6 @@ CPUARMState *cpu_arm_init(const char *cpu_model) { } env->cp15.c0_cpuid = id; - // if (arm_feature(env, ARM_FEATURE_NEON)) { // gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); // } else if (arm_feature(env, ARM_FEATURE_VFP3)) { @@ -723,8 +722,9 @@ static void switch_v7m_sp(CPUARMState *env, int process) { env->v7m.other_sp = RR_cpu(env,regs[13]); WR_cpu(env,regs[13],tmp); env->v7m.current_sp = process; + HPRINTF(" switch sp tmp = 0x%x, current_sp = 0x%x, other_sp = 0x%x\n", tmp, env->v7m.current_sp, env->v7m.other_sp); } else { - HPRINTF(" already in handle mode current_sp = 0x%x, other_sp = 0x%x", env->v7m.current_sp, env->v7m.other_sp); + HPRINTF(" already in handle mode current_sp = 0x%x, other_sp = 0x%x\n", env->v7m.current_sp, env->v7m.other_sp); } } @@ -745,13 +745,17 @@ static void do_v7m_exception_exit(CPUARMState *env) { WR_cpu(env,regs[3],v7m_pop(env)); WR_cpu(env,regs[12],v7m_pop(env)); WR_cpu(env,regs[14],v7m_pop(env)); - HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); env->regs[15] = v7m_pop(env); xpsr = v7m_pop(env); xpsr_write(env, xpsr, 0xfffffdff); /* Undo stack alignment. */ if (xpsr & 0x200) WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) | 4)); + + HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); + HPRINTF(" R3=0x%x R4=0x%x R5=0x%x R6=0x%x R7=0x%x R8=0x%x R9=0x%x R10=0x%x R11=0x%x R12=0x%x R2=0x%x R1=0x%x R0=0x%x\n", + RR_cpu(env,regs[3]), RR_cpu(env,regs[4]), RR_cpu(env,regs[5]), RR_cpu(env,regs[6]), RR_cpu(env,regs[7]), RR_cpu(env,regs[8]), + RR_cpu(env,regs[9]), RR_cpu(env,regs[10]), RR_cpu(env,regs[11]), RR_cpu(env,regs[12]), RR_cpu(env,regs[2]), RR_cpu(env,regs[1]), RR_cpu(env,regs[0])); /* ??? The exception return type specifies Thread/Handler mode. However this is also implied by the xPSR value. Not sure what to do if there is a mismatch. */ @@ -812,6 +816,11 @@ void do_interrupt_v7m(CPUARMState *env) { return; /* Never happens. Keep compiler happy. */ } + HPRINTF(" interrupt r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); + HPRINTF(" R3=0x%x R4=0x%x R5=0x%x R6=0x%x R7=0x%x R8=0x%x R9=0x%x R10=0x%x R11=0x%x R12=0x%x R2=0x%x R1=0x%x R0=0x%x\n", + RR_cpu(env,regs[3]), RR_cpu(env,regs[4]), RR_cpu(env,regs[5]), RR_cpu(env,regs[6]), RR_cpu(env,regs[7]), RR_cpu(env,regs[8]), + RR_cpu(env,regs[9]), RR_cpu(env,regs[10]), RR_cpu(env,regs[11]), RR_cpu(env,regs[12]), RR_cpu(env,regs[2]), RR_cpu(env,regs[1]), RR_cpu(env,regs[0])); + /* Align stack pointer. */ /* ??? Should only do this if Configuration Control Register STACKALIGN bit is set. */ @@ -822,7 +831,6 @@ void do_interrupt_v7m(CPUARMState *env) { v7m_push(env, xpsr); v7m_push(env, env->regs[15]); - HPRINTF(" interrupt r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); v7m_push(env, RR_cpu(env,regs[14])); v7m_push(env, RR_cpu(env,regs[12])); v7m_push(env, RR_cpu(env,regs[3])); diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 512c8fa..9b14355 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -10088,7 +10088,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if ((insn & 0x0900) == 0x0900) { store_reg_from_load(env, s, 15, tmp); // To find how many other regs pop with pc - TPRINTF("pc = 0x%x r0 = 0x%x\n", env->regs[15],env->regs[0]); + TPRINTF("pc = 0x%x addr = 0x%x tmp = 0x%x\n", env->regs[15], addr, tmp); if (env->v7m.exception != 0 && IS_M(env)) { count = 0; for (k = 0; k < 8; k++) { From 34a2eecddb918b0a34a02602b2dfd1d695e8ec0d Mon Sep 17 00:00:00 2001 From: chaojixx Date: Tue, 10 Dec 2019 20:49:34 -0500 Subject: [PATCH 17/59] msr:hard-code sync sregs between kvm env with cpu env replace the old methed (syncing sregs via exit reason is slow and unsoild during race) Signed-off-by: chaojixx --- include/cpu/arm/cpu.h | 1 - include/cpu/kvm.h | 3 --- src/CMakeLists.txt | 4 ++-- src/cpu-exec.c | 4 ++-- src/target-arm/cpu.h | 7 +++++++ src/target-arm/helper.c | 21 +++++++++++++++++---- 6 files changed, 28 insertions(+), 12 deletions(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index cc7027c..903e346 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -209,7 +209,6 @@ typedef struct CPUARMState { /* For KVM */ int kvm_request_interrupt_window; int kvm_irq; - int kvm_exit_code; // now only used for msr uint8_t timer_interrupt_disabled; } CPUARMState; diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index 8a8d97e..a4108d4 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -182,9 +182,6 @@ struct kvm_pit_config { #define KVM_EXIT_EPR 23 #define KVM_EXIT_SYSTEM_EVENT 24 -/* ARM Cortex-m exit codes */ -#define KVM_EXIT_SYNC_ARM_V7M_SREGS 40 - /* Symbolic execution exit codes */ #define KVM_EXIT_FLUSH_DISK 100 #define KVM_EXIT_SAVE_DEV_STATE 101 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e715569..88df9ae 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -31,11 +31,11 @@ target_include_directories(cpu PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} $ENV{S2EDIR}/qemu/include/nvic) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__STDC_FORMAT_MACROS -D_GNU_SOURCE -DNEED_CPU_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DTARGET_PHYS_ADDR_BITS=64") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -Werror -fno-omit-frame-pointer") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -fno-omit-frame-pointer") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing -Wno-sign-compare -Wno-missing-field-initializers -Wno-mismatched-tags -Wno-deprecated-declarations -Wno-initializer-overrides -Wno-zero-length-array") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexceptions") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fPIC -Wno-mismatched-tags -Werror -Wno-zero-length-array") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fPIC -Wno-mismatched-tags -Wno-zero-length-array") if(WITH_SYMBEX) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCONFIG_SYMBEX") diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 89ddb9d..369bd7f 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -450,12 +450,12 @@ static bool process_interrupt_request(CPUArchState *env) { // in case basepri has not been synced so add exit code condition if ((interrupt_request & CPU_INTERRUPT_HARD) && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { - if ((armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->kvm_exit_code == 0)) { + if ((armv7m_nvic_can_take_pending_exception(env->nvic))) { env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; } else { - DPRINTF("cpu basepri = %d take_exc = %d kvm_exit_code = %d\n", env->v7m.basepri, armv7m_nvic_can_take_pending_exception(env->nvic), env->kvm_exit_code); + DPRINTF("cpu basepri = %d take_exc = %d\n", env->v7m.basepri, armv7m_nvic_can_take_pending_exception(env->nvic)); } } #endif diff --git a/src/target-arm/cpu.h b/src/target-arm/cpu.h index 8738cac..2a3c5c8 100644 --- a/src/target-arm/cpu.h +++ b/src/target-arm/cpu.h @@ -193,6 +193,12 @@ static inline uint32_t xpsr_read(CPUARMState *env) { /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { +//sync var exception for kvm env by using hard-code offset + unsigned long *armcpu; + uint32_t *exception; + armcpu = env->nvic+0x308; + exception = (unsigned long)(*armcpu+0x8b50); + if (mask & CPSR_NZCV) { WR_cpu(env, ZF, (~val) & CPSR_Z); WR_cpu(env, NF, val); @@ -213,6 +219,7 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { } if (mask & 0x1ff) { env->v7m.exception = val & 0x1ff; + *exception = val & 0x1ff; } } diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index b9ed5e3..7844704 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -770,6 +770,11 @@ void do_interrupt_v7m(CPUARMState *env) { int exc; bool targets_secure; +//sync var exception for kvm env by using hard-code offset + unsigned long *armcpu; + uint32_t *exception; + armcpu = env->nvic+0x308; + exception = (unsigned long)(*armcpu+0x8b50); lr = 0xfffffff1; if (env->v7m.current_sp) @@ -807,6 +812,7 @@ void do_interrupt_v7m(CPUARMState *env) { armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); armv7m_nvic_acknowledge_irq(env->nvic); env->v7m.exception = exc; + *exception = exc; break; case EXCP_EXCEPTION_EXIT: do_v7m_exception_exit(env); @@ -2261,6 +2267,14 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) { } void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { + unsigned long *armcpu; + uint32_t *basepri; + uint32_t *primask; + + armcpu = env->nvic+0x308; + basepri = (unsigned long)(*armcpu+0x8b0c); + primask = (unsigned long)(*armcpu+0x8b54); + switch (reg) { case 0: /* APSR */ xpsr_write(env, val, 0xf8000000); @@ -2296,6 +2310,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { env->v7m.other_sp = val; break; case 16: /* PRIMASK */ + *primask = val & 1; if (val & 1) env->uncached_cpsr |= CPSR_I; else @@ -2303,15 +2318,13 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { break; case 17: /* BASEPRI */ env->v7m.basepri = val & 0xff; - env->kvm_exit_code = 1; - cpu_exit(env); + *basepri = val & 0xff; /*sync with kvm env basepri */ break; case 18: /* BASEPRI_MAX */ val &= 0xff; if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) { env->v7m.basepri = val; - env->kvm_exit_code = 1; - cpu_exit(env); + *basepri = val; /*sync with kvm env basepri */ } break; case 19: /* FAULTMASK */ From 128cbe64d548ee8a5eedc515ba9e495e3cef9a5e Mon Sep 17 00:00:00 2001 From: chaojixx Date: Tue, 10 Dec 2019 13:59:09 -0500 Subject: [PATCH 18/59] v7m_interrupt: add interrupt support for symbex mode note: regs will concretizing when executing interrupt in symbex mode Signed-off-by: chaojixx --- include/cpu/se_libcpu.h | 22 ++++++++++++++++++++-- src/target-arm/helper.c | 4 ++-- src/target-arm/op_helper.c | 10 ++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index e57e4b0..0e1c253 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -42,10 +42,23 @@ struct CPUARMState; #endif typedef uintptr_t (*se_libcpu_tb_exec_t)(CPUArchState *env1, struct TranslationBlock *tb); +#if defined(TARGET_I386) || defined(TARGET_X86_64) typedef void (*se_do_interrupt_all_t)(int intno, int is_int, int error_code, uintptr_t next_eip, int is_hw); -typedef void (*se_do_interrupt_arm_t)(CPUArchState *env1); +#elif defined(TARGET_ARM) +typedef void (*se_do_interrupt_arm_t)(void); +#else +#error Unsupported target architecture +#endif + +#if defined(TARGET_I386) || defined(TARGET_X86_64) void se_do_interrupt_all(int intno, int is_int, int error_code, target_ulong next_eip, int is_hw); -void se_do_interrupt_arm(CPUArchState *env1); +#elif defined(TARGET_ARM) +void se_do_interrupt_arm(void); +void se_helper_do_interrupt_arm(CPUArchState *env); +#else +#error Unsupported target architecture +#endif + #define MEM_TRACE_FLAG_IO 1 #define MEM_TRACE_FLAG_WRITE 2 #define MEM_TRACE_FLAG_PRECISE 4 @@ -83,8 +96,13 @@ struct se_libcpu_interface_t { void (*switch_to_symbolic)(void *retaddr) __attribute__((noreturn)); se_libcpu_tb_exec_t tb_exec; +#if defined(TARGET_I386) || defined(TARGET_X86_64) se_do_interrupt_all_t do_interrupt_all; +#elif defined(TARGET_ARM) se_do_interrupt_arm_t do_interrupt_arm; +#else +#error Unsupported target architecture +#endif unsigned *clock_scaling_factor; } exec; diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 7844704..308cf2e 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -865,9 +865,9 @@ void do_interrupt_v7m(CPUARMState *env) { /* This will be called from S2EExecutor if running concretely; It will in turn call the real ARM IRQ handler with current CPUARMState.*/ void do_interrupt(CPUARMState *env) { - g_sqi.exec.do_interrupt_arm(env); + g_sqi.exec.do_interrupt_arm(); } -void se_do_interrupt_arm(CPUARMState *env) { +void se_helper_do_interrupt_arm(CPUARMState *env) { #else /* Handle a CPU exception. */ void do_interrupt(CPUARMState *env) { diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 436d7c6..d0c69a2 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -88,6 +88,16 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t max #endif +#ifdef CONFIG_SYMBEX +#include + +/* This will be called from S2EExecutor if running concretely; It will + in turn call the real ARM IRQ handler with current CPUARMState.*/ +void se_do_interrupt_arm(void) +{ + se_helper_do_interrupt_arm(env); +} +#endif /* try to fill the TLB and return an exception if error. If retaddr is NULL, it means that the function was called in C code (i.e. not from generated code or from helper.c) */ From 3fb35629173a6d93771521384f43f2be4ee8da00 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 11 Dec 2019 17:27:28 -0500 Subject: [PATCH 19/59] helper: abort hard fault interrupt Signed-off-by: chaojixx --- src/target-arm/helper.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 308cf2e..3a7c02c 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -795,6 +795,7 @@ void do_interrupt_v7m(CPUARMState *env) { case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false); + cpu_abort(env, "hard fault exception 0x%x\n", env->exception_index); break; case EXCP_BKPT: if (semihosting_enabled) { From 927edb6f683f08738e3d4b1bf55f5867fc60cdbb Mon Sep 17 00:00:00 2001 From: chaojixx Date: Tue, 17 Dec 2019 19:46:17 -0500 Subject: [PATCH 20/59] add exit reason KVM_EXIT_SYNC_SREGS Signed-off-by: chaojixx --- include/cpu/kvm.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index a4108d4..7a2f151 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -188,6 +188,8 @@ struct kvm_pit_config { #define KVM_EXIT_RESTORE_DEV_STATE 102 #define KVM_EXIT_CLONE_PROCESS 103 +#define KVM_EXIT_SYNC_SREGS 110 + /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ #define KVM_INTERNAL_ERROR_EMULATION 1 From 4e1b1ae5fefb558abb1b6a80f3149b1e7b62098e Mon Sep 17 00:00:00 2001 From: chaojixx Date: Sat, 28 Dec 2019 11:01:48 -0500 Subject: [PATCH 21/59] target-arm:remove helper.h Now helper.h is in tcg.h temporarily Signed-off-by: chaojixx --- src/target-arm/helper.c | 5 +- src/target-arm/helper.h | 505 --------------------------------- src/target-arm/iwmmxt_helper.c | 3 +- src/target-arm/neon_helper.c | 2 +- src/target-arm/op_helper.c | 4 +- src/target-arm/translate.c | 4 - 6 files changed, 6 insertions(+), 517 deletions(-) delete mode 100644 src/target-arm/helper.h diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 3a7c02c..15ae5e3 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -22,9 +22,8 @@ #include -#include "helper.h" - -#include "host-utils.h" +#include +#include #define DEBUG_HELPER diff --git a/src/target-arm/helper.h b/src/target-arm/helper.h deleted file mode 100644 index b005fd4..0000000 --- a/src/target-arm/helper.h +++ /dev/null @@ -1,505 +0,0 @@ -/// Copyright (C) 2003 Fabrice Bellard -/// Copyright (C) 2010 Dependable Systems Laboratory, EPFL -/// Copyright (C) 2016 Cyberhaven -/// Copyrights of all contributions belong to their respective owners. -/// -/// This library is free software; you can redistribute it and/or -/// modify it under the terms of the GNU Library General Public -/// License as published by the Free Software Foundation; either -/// version 2 of the License, or (at your option) any later version. -/// -/// This library is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -/// Library General Public License for more details. -/// -/// You should have received a copy of the GNU Library General Public -/// License along with this library; if not, see . - -#include "def-helper.h" - -#include - -#define _RM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) -#define _WM_EXCP (_M_CF | _M_VF | _M_NF | _M_ZF) -#define _AM_EXCP 0 - -DEF_HELPER_2_M(cpsr_write, void, i32, i32, -1, -1, 1) -DEF_HELPER_0_M(cpsr_read, i32, -1, -1, 1) - -DEF_HELPER_1_M(get_user_reg, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(set_user_reg, void, i32, i32, -1, -1, 1) - -DEF_HELPER_2_M(add_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(adc_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(sub_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(sbc_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(shl_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(shr_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(sar_cc, i32, i32, i32, -1, -1, 1) -DEF_HELPER_2_M(ror_cc, i32, i32, i32, -1, -1, 1) - -DEF_HELPER_1(clz, i32, i32) -DEF_HELPER_1(sxtb16, i32, i32) -DEF_HELPER_1(uxtb16, i32, i32) - -DEF_HELPER_2(add_setq, i32, i32, i32) -DEF_HELPER_2(add_saturate, i32, i32, i32) -DEF_HELPER_2(sub_saturate, i32, i32, i32) -DEF_HELPER_2(add_usaturate, i32, i32, i32) -DEF_HELPER_2(sub_usaturate, i32, i32, i32) -DEF_HELPER_1(double_saturate, i32, s32) -DEF_HELPER_2(sdiv, s32, s32, s32) -DEF_HELPER_2(udiv, i32, i32, i32) -DEF_HELPER_1(rbit, i32, i32) -DEF_HELPER_1(abs, i32, i32) - -#define PAS_OP(pfx) \ - DEF_HELPER_3(pfx##add8, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx##sub8, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx##sub16, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx##add16, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx##addsubx, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx##subaddx, i32, i32, i32, ptr) - -PAS_OP(s) -PAS_OP(u) -#undef PAS_OP - -#define PAS_OP(pfx) \ - DEF_HELPER_2(pfx##add8, i32, i32, i32) \ - DEF_HELPER_2(pfx##sub8, i32, i32, i32) \ - DEF_HELPER_2(pfx##sub16, i32, i32, i32) \ - DEF_HELPER_2(pfx##add16, i32, i32, i32) \ - DEF_HELPER_2(pfx##addsubx, i32, i32, i32) \ - DEF_HELPER_2(pfx##subaddx, i32, i32, i32) -PAS_OP(q) -PAS_OP(sh) -PAS_OP(uq) -PAS_OP(uh) -#undef PAS_OP - -DEF_HELPER_2(ssat, i32, i32, i32) -DEF_HELPER_2(usat, i32, i32, i32) -DEF_HELPER_2(ssat16, i32, i32, i32) -DEF_HELPER_2(usat16, i32, i32, i32) - -DEF_HELPER_2(usad8, i32, i32, i32) - -DEF_HELPER_1(logicq_cc, i32, i64) - -DEF_HELPER_3(sel_flags, i32, i32, i32, i32) -DEF_HELPER_1(exception, void, i32) -DEF_HELPER_0(wfi, void) - -DEF_HELPER_2(get_r13_banked, i32, env, i32) -DEF_HELPER_3(set_r13_banked, void, env, i32, i32) - -// DEF_HELPER_2(get_r14_banked, i32, env, i32) -// DEF_HELPER_3(set_r14_banked, void, env, i32, i32) - -// DEF_HELPER_2(get_spsr_banked, i32, env, i32) -// DEF_HELPER_3(set_spsr_banked, void, env, i32, i32) - -DEF_HELPER_3(v7m_msr, void, env, i32, i32) -DEF_HELPER_2(v7m_mrs, i32, env, i32) - -DEF_HELPER_3(set_cp15, void, env, i32, i32) -DEF_HELPER_2(get_cp15, i32, env, i32) - -DEF_HELPER_3(set_cp, void, env, i32, i32) -DEF_HELPER_2(get_cp, i32, env, i32) - -DEF_HELPER_1(vfp_get_fpscr, i32, env) -DEF_HELPER_2(vfp_set_fpscr, void, env, i32) - -DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr) -DEF_HELPER_1(vfp_negs, f32, f32) -DEF_HELPER_1(vfp_negd, f64, f64) -DEF_HELPER_1(vfp_abss, f32, f32) -DEF_HELPER_1(vfp_absd, f64, f64) -DEF_HELPER_2(vfp_sqrts, f32, f32, env) -DEF_HELPER_2(vfp_sqrtd, f64, f64, env) -DEF_HELPER_3(vfp_cmps, void, f32, f32, env) -DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) -DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) -DEF_HELPER_3(vfp_cmped, void, f64, f64, env) - -DEF_HELPER_2(vfp_fcvtds, f64, f32, env) -DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) - -DEF_HELPER_2(vfp_uitos, f32, i32, ptr) -DEF_HELPER_2(vfp_uitod, f64, i32, ptr) -DEF_HELPER_2(vfp_sitos, f32, i32, ptr) -DEF_HELPER_2(vfp_sitod, f64, i32, ptr) - -DEF_HELPER_2(vfp_touis, i32, f32, ptr) -DEF_HELPER_2(vfp_touid, i32, f64, ptr) -DEF_HELPER_2(vfp_touizs, i32, f32, ptr) -DEF_HELPER_2(vfp_touizd, i32, f64, ptr) -DEF_HELPER_2(vfp_tosis, i32, f32, ptr) -DEF_HELPER_2(vfp_tosid, i32, f64, ptr) -DEF_HELPER_2(vfp_tosizs, i32, f32, ptr) -DEF_HELPER_2(vfp_tosizd, i32, f64, ptr) - -DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) - -DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) -DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) -DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env) -DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env) - -DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) -DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) - -DEF_HELPER_3(recps_f32, f32, f32, f32, env) -DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) -DEF_HELPER_2(recpe_f32, f32, f32, env) -DEF_HELPER_2(rsqrte_f32, f32, f32, env) -DEF_HELPER_2(recpe_u32, i32, i32, env) -DEF_HELPER_2(rsqrte_u32, i32, i32, env) -DEF_HELPER_4(neon_tbl, i32, i32, i32, i32, i32) - -DEF_HELPER_2(shl, i32, i32, i32) -DEF_HELPER_2(shr, i32, i32, i32) -DEF_HELPER_2(sar, i32, i32, i32) - -/* neon_helper.c */ -DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64) -DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64) - -DEF_HELPER_2(neon_hadd_s8, i32, i32, i32) -DEF_HELPER_2(neon_hadd_u8, i32, i32, i32) -DEF_HELPER_2(neon_hadd_s16, i32, i32, i32) -DEF_HELPER_2(neon_hadd_u16, i32, i32, i32) -DEF_HELPER_2(neon_hadd_s32, s32, s32, s32) -DEF_HELPER_2(neon_hadd_u32, i32, i32, i32) -DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32) -DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32) -DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32) -DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32) -DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32) -DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32) -DEF_HELPER_2(neon_hsub_s8, i32, i32, i32) -DEF_HELPER_2(neon_hsub_u8, i32, i32, i32) -DEF_HELPER_2(neon_hsub_s16, i32, i32, i32) -DEF_HELPER_2(neon_hsub_u16, i32, i32, i32) -DEF_HELPER_2(neon_hsub_s32, s32, s32, s32) -DEF_HELPER_2(neon_hsub_u32, i32, i32, i32) - -DEF_HELPER_2(neon_cgt_u8, i32, i32, i32) -DEF_HELPER_2(neon_cgt_s8, i32, i32, i32) -DEF_HELPER_2(neon_cgt_u16, i32, i32, i32) -DEF_HELPER_2(neon_cgt_s16, i32, i32, i32) -DEF_HELPER_2(neon_cgt_u32, i32, i32, i32) -DEF_HELPER_2(neon_cgt_s32, i32, i32, i32) -DEF_HELPER_2(neon_cge_u8, i32, i32, i32) -DEF_HELPER_2(neon_cge_s8, i32, i32, i32) -DEF_HELPER_2(neon_cge_u16, i32, i32, i32) -DEF_HELPER_2(neon_cge_s16, i32, i32, i32) -DEF_HELPER_2(neon_cge_u32, i32, i32, i32) -DEF_HELPER_2(neon_cge_s32, i32, i32, i32) - -DEF_HELPER_2(neon_min_u8, i32, i32, i32) -DEF_HELPER_2(neon_min_s8, i32, i32, i32) -DEF_HELPER_2(neon_min_u16, i32, i32, i32) -DEF_HELPER_2(neon_min_s16, i32, i32, i32) -DEF_HELPER_2(neon_min_u32, i32, i32, i32) -DEF_HELPER_2(neon_min_s32, i32, i32, i32) -DEF_HELPER_2(neon_max_u8, i32, i32, i32) -DEF_HELPER_2(neon_max_s8, i32, i32, i32) -DEF_HELPER_2(neon_max_u16, i32, i32, i32) -DEF_HELPER_2(neon_max_s16, i32, i32, i32) -DEF_HELPER_2(neon_max_u32, i32, i32, i32) -DEF_HELPER_2(neon_max_s32, i32, i32, i32) -DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) -DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) -DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) -DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) -DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) -DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) -DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) -DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) - -DEF_HELPER_2(neon_abd_u8, i32, i32, i32) -DEF_HELPER_2(neon_abd_s8, i32, i32, i32) -DEF_HELPER_2(neon_abd_u16, i32, i32, i32) -DEF_HELPER_2(neon_abd_s16, i32, i32, i32) -DEF_HELPER_2(neon_abd_u32, i32, i32, i32) -DEF_HELPER_2(neon_abd_s32, i32, i32, i32) - -DEF_HELPER_2(neon_shl_u8, i32, i32, i32) -DEF_HELPER_2(neon_shl_s8, i32, i32, i32) -DEF_HELPER_2(neon_shl_u16, i32, i32, i32) -DEF_HELPER_2(neon_shl_s16, i32, i32, i32) -DEF_HELPER_2(neon_shl_u32, i32, i32, i32) -DEF_HELPER_2(neon_shl_s32, i32, i32, i32) -DEF_HELPER_2(neon_shl_u64, i64, i64, i64) -DEF_HELPER_2(neon_shl_s64, i64, i64, i64) -DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) -DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) -DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) -DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32); -DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32); -DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32); -DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64); -DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) -// DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) -// DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) - -DEF_HELPER_2(neon_add_u8, i32, i32, i32) -DEF_HELPER_2(neon_add_u16, i32, i32, i32) -DEF_HELPER_2(neon_padd_u8, i32, i32, i32) -DEF_HELPER_2(neon_padd_u16, i32, i32, i32) -DEF_HELPER_2(neon_sub_u8, i32, i32, i32) -DEF_HELPER_2(neon_sub_u16, i32, i32, i32) -DEF_HELPER_2(neon_mul_u8, i32, i32, i32) -DEF_HELPER_2(neon_mul_u16, i32, i32, i32) -DEF_HELPER_2(neon_mul_p8, i32, i32, i32) -DEF_HELPER_2(neon_mull_p8, i64, i32, i32) - -DEF_HELPER_2(neon_tst_u8, i32, i32, i32) -DEF_HELPER_2(neon_tst_u16, i32, i32, i32) -DEF_HELPER_2(neon_tst_u32, i32, i32, i32) -DEF_HELPER_2(neon_ceq_u8, i32, i32, i32) -DEF_HELPER_2(neon_ceq_u16, i32, i32, i32) -DEF_HELPER_2(neon_ceq_u32, i32, i32, i32) - -DEF_HELPER_1(neon_abs_s8, i32, i32) -DEF_HELPER_1(neon_abs_s16, i32, i32) -DEF_HELPER_1(neon_clz_u8, i32, i32) -DEF_HELPER_1(neon_clz_u16, i32, i32) -DEF_HELPER_1(neon_cls_s8, i32, i32) -DEF_HELPER_1(neon_cls_s16, i32, i32) -DEF_HELPER_1(neon_cls_s32, i32, i32) -DEF_HELPER_1(neon_cnt_u8, i32, i32) - -DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) - -DEF_HELPER_1(neon_narrow_u8, i32, i64) -DEF_HELPER_1(neon_narrow_u16, i32, i64) -DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64) -DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64) -DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64) -DEF_HELPER_1(neon_narrow_high_u8, i32, i64) -DEF_HELPER_1(neon_narrow_high_u16, i32, i64) -DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) -DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) -DEF_HELPER_1(neon_widen_u8, i64, i32) -DEF_HELPER_1(neon_widen_s8, i64, i32) -DEF_HELPER_1(neon_widen_u16, i64, i32) -DEF_HELPER_1(neon_widen_s16, i64, i32) - -DEF_HELPER_2(neon_addl_u16, i64, i64, i64) -DEF_HELPER_2(neon_addl_u32, i64, i64, i64) -DEF_HELPER_2(neon_paddl_u16, i64, i64, i64) -DEF_HELPER_2(neon_paddl_u32, i64, i64, i64) -DEF_HELPER_2(neon_subl_u16, i64, i64, i64) -DEF_HELPER_2(neon_subl_u32, i64, i64, i64) -DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) -DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) -DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) -DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) -DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) -DEF_HELPER_2(neon_mull_u8, i64, i32, i32) -DEF_HELPER_2(neon_mull_s8, i64, i32, i32) -DEF_HELPER_2(neon_mull_u16, i64, i32, i32) -DEF_HELPER_2(neon_mull_s16, i64, i32, i32) - -DEF_HELPER_1(neon_negl_u16, i64, i64) -DEF_HELPER_1(neon_negl_u32, i64, i64) -DEF_HELPER_1(neon_negl_u64, i64, i64) - -DEF_HELPER_2(neon_qabs_s8, i32, env, i32) -DEF_HELPER_2(neon_qabs_s16, i32, env, i32) -DEF_HELPER_2(neon_qabs_s32, i32, env, i32) -DEF_HELPER_2(neon_qneg_s8, i32, env, i32) -DEF_HELPER_2(neon_qneg_s16, i32, env, i32) -DEF_HELPER_2(neon_qneg_s32, i32, env, i32) - -DEF_HELPER_3(neon_min_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_max_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr) - -/* iwmmxt_helper.c */ -DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) -DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) - -#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ - DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ - DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ - DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) - -DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) -DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) - -DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) - -DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) - -DEF_IWMMXT_HELPER_SIZE_ENV(mins) -DEF_IWMMXT_HELPER_SIZE_ENV(minu) -DEF_IWMMXT_HELPER_SIZE_ENV(maxs) -DEF_IWMMXT_HELPER_SIZE_ENV(maxu) - -DEF_IWMMXT_HELPER_SIZE_ENV(subn) -DEF_IWMMXT_HELPER_SIZE_ENV(addn) -DEF_IWMMXT_HELPER_SIZE_ENV(subu) -DEF_IWMMXT_HELPER_SIZE_ENV(addu) -DEF_IWMMXT_HELPER_SIZE_ENV(subs) -DEF_IWMMXT_HELPER_SIZE_ENV(adds) - -DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) - -DEF_HELPER_2(iwmmxt_msadb, i64, i64, i64) - -DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) -DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) - -DEF_HELPER_1(iwmmxt_bcstb, i64, i32) -DEF_HELPER_1(iwmmxt_bcstw, i64, i32) -DEF_HELPER_1(iwmmxt_bcstl, i64, i32) - -DEF_HELPER_1(iwmmxt_addcb, i64, i64) -DEF_HELPER_1(iwmmxt_addcw, i64, i64) -DEF_HELPER_1(iwmmxt_addcl, i64, i64) - -DEF_HELPER_1(iwmmxt_msbb, i32, i64) -DEF_HELPER_1(iwmmxt_msbw, i32, i64) -DEF_HELPER_1(iwmmxt_msbl, i32, i64) - -DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) - -DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) - -DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) - -DEF_HELPER_2(set_teecr, void, env, i32) - -DEF_HELPER_3(neon_unzip8, void, env, i32, i32) -DEF_HELPER_3(neon_unzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip8, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip32, void, env, i32, i32) -DEF_HELPER_3(neon_zip8, void, env, i32, i32) -DEF_HELPER_3(neon_zip16, void, env, i32, i32) -DEF_HELPER_3(neon_qzip8, void, env, i32, i32) -DEF_HELPER_3(neon_qzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qzip32, void, env, i32, i32) - -#include "def-helper.h" diff --git a/src/target-arm/iwmmxt_helper.c b/src/target-arm/iwmmxt_helper.c index 02d55af..ddce171 100644 --- a/src/target-arm/iwmmxt_helper.c +++ b/src/target-arm/iwmmxt_helper.c @@ -24,8 +24,7 @@ #include "cpu.h" #include "exec-all.h" -#include "helper.h" - +#include /* iwMMXt macros extracted from GNU gdb. */ /* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ diff --git a/src/target-arm/neon_helper.c b/src/target-arm/neon_helper.c index d231ac5..8db7825 100644 --- a/src/target-arm/neon_helper.c +++ b/src/target-arm/neon_helper.c @@ -11,7 +11,7 @@ #include "cpu.h" #include "exec-all.h" -#include "helper.h" +#include #define SIGNBIT (uint32_t) 0x80000000 #define SIGNBIT64 ((uint64_t) 1 << 63) diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index d0c69a2..360fa11 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -18,8 +18,8 @@ #include "cpu-defs.h" #include "cpu.h" -#include "dyngen-exec.h" -#include "helper.h" + +#include #define SIGNBIT (uint32_t) 0x80000000 #define SIGNBIT64 ((uint64_t) 1 << 63) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 9b14355..61862cb 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -29,10 +29,6 @@ #include // clang-format on -#include "helper.h" -#define GEN_HELPER 1 -#include "helper.h" - #include #ifdef CONFIG_SYMBEX From e7b51a2a4f05fb867427c52a0c51fb44a7cef0a0 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Sat, 28 Dec 2019 11:08:55 -0500 Subject: [PATCH 22/59] S2E/issue/269-upgrade update from tcg 1.0 to tcg 4.0 Signed-off-by: chaojixx --- src/exec-all.h | 8 +- src/target-arm/helper.c | 8 +- src/target-arm/op_helper.c | 38 ++-- src/target-arm/translate.c | 355 ++++++++++++++++--------------------- 4 files changed, 172 insertions(+), 237 deletions(-) diff --git a/src/exec-all.h b/src/exec-all.h index e8320ca..fd7527e 100644 --- a/src/exec-all.h +++ b/src/exec-all.h @@ -151,8 +151,8 @@ extern int tb_invalidated_flag; #undef MEMSUFFIX #undef env #if defined(TARGET_ARM) -static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) { - uint32_t insn = ldl_code(addr); +static inline uint32_t arm_ldl_code(CPUArchState *env, uint32_t addr, bool do_swap) { + uint32_t insn = cpu_ldl_code(env, addr); if (do_swap) { return bswap32(insn); } @@ -160,8 +160,8 @@ static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap) { } /* Ditto, for a halfword (Thumb) instruction */ -static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap) { - uint16_t insn = lduw_code(addr); +static inline uint16_t arm_lduw_code(CPUArchState *env, uint32_t addr, bool do_swap) { + uint16_t insn = cpu_lduw_code(env, addr); if (do_swap) { return bswap16(insn); } diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 15ae5e3..2c663ad 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -799,7 +799,7 @@ void do_interrupt_v7m(CPUARMState *env) { case EXCP_BKPT: if (semihosting_enabled) { int nr; - nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; if (nr == 0xab) { env->regs[15] += 2; WR_cpu(env,regs[0],do_arm_semihosting(env)); @@ -896,9 +896,9 @@ void do_interrupt(CPUARMState *env) { if (semihosting_enabled) { /* Check for semihosting interrupt. */ if (env->thumb) { - mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff; + mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code) & 0xff; } else { - mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code) & 0xffffff; + mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code) & 0xffffff; } /* Only intercept calls from privileged modes, to provide some semblance of security. */ @@ -917,7 +917,7 @@ void do_interrupt(CPUARMState *env) { case EXCP_BKPT: /* See if this is a semihosting syscall. */ if (env->thumb && semihosting_enabled) { - mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff; + mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; if (mask == 0xab && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { env->regs[15] += 2; WR_cpu(env,regs[0],do_arm_semihosting(env)); diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 360fa11..2b1cff7 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -24,6 +24,7 @@ #define SIGNBIT (uint32_t) 0x80000000 #define SIGNBIT64 ((uint64_t) 1 << 63) +// SYMBEX: Keep the environment in a variable struct CPUARMState *env = 0; static void raise_exception(int tt) { @@ -103,9 +104,7 @@ void se_do_interrupt_arm(void) from generated code or from helper.c) */ /* XXX: fix it to restore all registers */ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int is_write, int mmu_idx, void *retaddr) { - TranslationBlock *tb; CPUArchState *saved_env; - unsigned long pc; int ret; saved_env = env; @@ -133,17 +132,6 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, target_ulong page_addr, int } #endif - if (retaddr) { - /* now we have a real cpu fault */ - pc = (uintptr_t) retaddr; - tb = tb_find_pc(pc); - if (tb) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, pc); - } - }; - #ifdef CONFIG_SYMBEX if (unlikely(*g_sqi.events.on_page_fault_signals_count)) { g_sqi.events.on_page_fault(addr, is_write, retaddr); @@ -286,16 +274,16 @@ void HELPER(exception)(uint32_t excp) { cpu_loop_exit(env); } -uint32_t HELPER(cpsr_read)(void) { +uint32_t HELPER(cpsr_read)(CPUARMState *env) { return cpsr_read(env) & ~CPSR_EXEC; } -void HELPER(cpsr_write)(uint32_t val, uint32_t mask) { +void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) { cpsr_write(env, val, mask); } /* Access to user mode registers from privileged modes. */ -uint32_t HELPER(get_user_reg)(uint32_t regno) { +uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) { uint32_t val; if (regno == 13) { @@ -312,7 +300,7 @@ uint32_t HELPER(get_user_reg)(uint32_t regno) { return val; } -void HELPER(set_user_reg)(uint32_t regno, uint32_t val) { +void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) { if (regno == 13) { WR_cpu(env, banked_r13[0], val); } else if (regno == 14) { @@ -330,7 +318,7 @@ void HELPER(set_user_reg)(uint32_t regno, uint32_t val) { The only way to do that in TCG is a conditional branch, which clobbers all our temporaries. For now implement these as helper functions. */ -uint32_t HELPER(add_cc)(uint32_t a, uint32_t b) { +uint32_t HELPER(add_cc)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t result; result = a + b; WR_cpu(env, NF, result); @@ -340,7 +328,7 @@ uint32_t HELPER(add_cc)(uint32_t a, uint32_t b) { return result; } -uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) { +uint32_t HELPER(adc_cc)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t result; if (!(RR_cpu(env, CF))) { result = a + b; @@ -355,7 +343,7 @@ uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) { return result; } -uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) { +uint32_t HELPER(sub_cc)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t result; result = a - b; WR_cpu(env, NF, result); @@ -365,7 +353,7 @@ uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) { return result; } -uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b) { +uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t result; if (!(RR_cpu(env, CF))) { result = a - b - 1; @@ -403,7 +391,7 @@ uint32_t HELPER(sar)(uint32_t x, uint32_t i) { return (int32_t) x >> shift; } -uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) { +uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { if (shift == 32) @@ -418,7 +406,7 @@ uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) { return x; } -uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) { +uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { if (shift == 32) @@ -433,7 +421,7 @@ uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) { return x; } -uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) { +uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { WR_cpu(env, CF, ((x >> 31) & 1)); @@ -445,7 +433,7 @@ uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) { return x; } -uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i) { +uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift1, shift; shift1 = i & 0xff; shift = shift1 & 0x1f; diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 61862cb..e505181 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -66,7 +66,7 @@ typedef struct DisasContext { /* Nonzero if this instruction has been conditionally skipped. */ int condjmp; /* The label that will be jumped to when the instruction is skipped. */ - int condlabel; + TCGLabel *condlabel; /* Thumb-2 condtional execution bits. */ int condexec_mask; int condexec_cond; @@ -96,13 +96,13 @@ typedef struct DisasContext { int instrument; /* 1 when it is ok to call plugin code */ int invalid_instr; /* tb contains invalid instruction */ #endif + CPUARMState *env; } DisasContext; #ifdef CONFIG_SYMBEX #define SET_TB_TYPE(t) s->tb->se_tb_type = t; #else #define SET_TB_TYPE(t) #endif -static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; #if defined(CONFIG_USER_ONLY) #define IS_USER(s) 1 @@ -115,7 +115,6 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; #define DISAS_WFI 4 #define DISAS_SWI 5 -static TCGv_ptr cpu_env; /* We reuse the same 64-bit temporaries for efficiency. */ static TCGv_i64 cpu_V0, cpu_V1, cpu_M0; static TCGv_i32 cpu_R[16]; @@ -143,11 +142,11 @@ static inline void instr_translate_compute_reg_mask_end(DisasContext *dc) { return; } - tcg_calc_regmask_ex(&tcg_ctx, &rmask, &wmask, &accesses_mem, dc->ins_opc, dc->ins_arg); + tcg_calc_regmask(tcg_ctx, &rmask, &wmask, &accesses_mem); - // First five bits contain flag registers - rmask >>= 5; - wmask >>= 5; + // First six bits denote access to flags registers + env ptr + rmask >>= 6; + wmask >>= 6; g_sqi.events.on_translate_register_access(dc->tb, dc->insPc, rmask, wmask, (int) accesses_mem); @@ -196,9 +195,6 @@ static inline void gen_instr_end(DisasContext *s) { static inline void gen_eob_event(DisasContext *s, int static_target, target_ulong target_pc) { gen_instr_end(s); - if (unlikely(*g_sqi.events.on_translate_register_access_signals_count && s->instrument)) - instr_translate_compute_reg_mask_end(s); - if (unlikely(*g_sqi.events.on_translate_block_end_signals_count && s->instrument)) g_sqi.events.on_translate_block_end(s->tb, s->insPc, static_target, target_pc); @@ -218,21 +214,38 @@ static inline void gen_eob_event(DisasContext *s, int static_target, target_ulon void arm_translate_init(void) { int i; - cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); - for (i = 0; i < 16; i++) { - cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, regs[i]), regnames[i]); + cpu_R[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, regs[i]), regnames[i]); } - cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); - cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_val), "exclusive_val"); - cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_high), "exclusive_high"); + cpu_exclusive_addr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + cpu_exclusive_val = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, exclusive_val), "exclusive_val"); + cpu_exclusive_high = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, exclusive_high), "exclusive_high"); #ifdef CONFIG_USER_ONLY - cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_test), "exclusive_test"); - cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, exclusive_info), "exclusive_info"); + cpu_exclusive_test = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, exclusive_test), "exclusive_test"); + cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, exclusive_info), "exclusive_info"); #endif +} + +static inline void gen_tb_start(TranslationBlock *tb) { + if (tb->cflags & CF_HAS_INTERRUPT_EXIT) { + TCGv_i32 exit_request; + + tcg_ctx->exitreq_label = gen_new_label(); + exit_request = tcg_temp_new_i32(); + tcg_gen_ld_i32(exit_request, cpu_env, offsetof(CPUState, exit_request)); + + tcg_gen_brcondi_i32(TCG_COND_NE, exit_request, 0, tcg_ctx->exitreq_label); -#define GEN_HELPER 2 -#include "helper.h" + tcg_temp_free_i32(exit_request); + } +} + +static inline void gen_tb_end(TranslationBlock *tb) { + if (tb->cflags & CF_HAS_INTERRUPT_EXIT) { + assert(tcg_ctx->exitreq_label); + gen_set_label(tcg_ctx->exitreq_label); + tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); + } } static inline TCGv load_cpu_offset(int offset) { @@ -294,7 +307,7 @@ static void store_reg(DisasContext *s, int reg, TCGv var) { static inline void gen_set_cpsr(TCGv var, uint32_t mask) { TCGv tmp_mask = tcg_const_i32(mask); - gen_helper_cpsr_write(var, tmp_mask); + gen_helper_cpsr_write(cpu_env, var, tmp_mask); tcg_temp_free_i32(tmp_mask); } /* Set NZCV flags from the high 4 bits of var. */ @@ -565,16 +578,16 @@ static inline void gen_arm_shift_reg(TCGv var, int shiftop, TCGv shift, int flag if (flags) { switch (shiftop) { case 0: - gen_helper_shl_cc(var, var, shift); + gen_helper_shl_cc(var, cpu_env, var, shift); break; case 1: - gen_helper_shr_cc(var, var, shift); + gen_helper_shr_cc(var, cpu_env, var, shift); break; case 2: - gen_helper_sar_cc(var, var, shift); + gen_helper_sar_cc(var, cpu_env, var, shift); break; case 3: - gen_helper_ror_cc(var, var, shift); + gen_helper_ror_cc(var, cpu_env, var, shift); break; } } else { @@ -712,10 +725,10 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) { } #undef PAS_OP -static void gen_test_cc(int cc, int label) { +static void gen_test_cc(int cc, TCGLabel *label) { TCGv tmp; TCGv tmp2; - int inv; + TCGLabel *inv; switch (cc) { case 0: /* eq: Z */ @@ -1401,7 +1414,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) { } else { tmp = tcg_temp_new_i32(); iwmmxt_load_reg(cpu_V0, rd); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_trunc_i64_tl(tmp, cpu_V0); } tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_mov_i32(dest, tmp); @@ -1424,9 +1437,9 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { rdhi = (insn >> 16) & 0xf; if (insn & ARM_CP_RW_BIT) { /* TMRRC */ iwmmxt_load_reg(cpu_V0, wrd); - tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_trunc_i64_tl(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + tcg_gen_trunc_i64_tl(cpu_R[rdhi], cpu_V0); } else { /* TMCRR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); iwmmxt_store_reg(cpu_V0, wrd); @@ -1480,15 +1493,15 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { tcg_temp_free_i32(tmp); tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s)); } else { /* WSTRW wRd */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); gen_st32(tmp, addr, IS_USER(s)); } } else { if (insn & (1 << 22)) { /* WSTRH */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); gen_st16(tmp, addr, IS_USER(s)); } else { /* WSTRB */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); gen_st8(tmp, addr, IS_USER(s)); } } @@ -1795,8 +1808,8 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { tmp3 = tcg_const_i32((insn & 1) << 5); break; default: - TCGV_UNUSED(tmp2); - TCGV_UNUSED(tmp3); + tmp2 = NULL; + tmp3 = NULL; } gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); tcg_temp_free(tmp3); @@ -1818,7 +1831,7 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext8s_i32(tmp, tmp); } else { @@ -1827,7 +1840,7 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { break; case 1: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext16s_i32(tmp, tmp); } else { @@ -1836,7 +1849,7 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { break; case 2: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_trunc_i64_tl(tmp, cpu_M0); break; } store_reg(s, rd, tmp); @@ -2639,9 +2652,9 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { if (insn & ARM_CP_RW_BIT) { /* MRA */ iwmmxt_load_reg(cpu_V0, acc); - tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_trunc_i64_tl(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + tcg_gen_trunc_i64_tl(cpu_R[rdhi], cpu_V0); tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); } else { /* MAR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); @@ -3743,10 +3756,10 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) { if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { tcg_gen_goto_tb(n); gen_set_pc_im(dest); - tcg_gen_exit_tb((tcg_target_long) tb + n); + tcg_gen_exit_tb(tb, n); } else { gen_set_pc_im(dest); - tcg_gen_exit_tb(0); + tcg_gen_exit_tb(NULL, 0); } } @@ -4230,7 +4243,6 @@ static int disas_neon_ls_insn(CPUARMState *env, DisasContext *s, uint32_t insn) } } else /* size == 0 */ { if (load) { - TCGV_UNUSED(tmp2); for (n = 0; n < 4; n++) { tmp = gen_ld8u(addr, IS_USER(s)); tcg_gen_addi_i32(addr, addr, stride); @@ -4446,7 +4458,7 @@ static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src) { gen_helper_neon_narrow_u16(dest, src); break; case 2: - tcg_gen_trunc_i64_i32(dest, src); + tcg_gen_trunc_i64_tl(dest, src); break; default: abort(); @@ -5849,11 +5861,9 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn tmp = neon_load_reg(rn, 1); neon_store_scratch(2, tmp); } - TCGV_UNUSED(tmp3); for (pass = 0; pass < 2; pass++) { if (src1_wide) { neon_load_reg64(cpu_V0, rn + pass); - TCGV_UNUSED(tmp); } else { if (pass == 1 && rd == rn) { tmp = neon_load_scratch(2); @@ -5866,7 +5876,6 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn } if (src2_wide) { neon_load_reg64(cpu_V1, rm + pass); - TCGV_UNUSED(tmp2); } else { if (pass == 1 && rd == rm) { tmp2 = neon_load_scratch(2); @@ -5972,7 +5981,7 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn break; case 2: tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_trunc_i64_tl(tmp, cpu_V0); break; default: abort(); @@ -5988,7 +5997,7 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn case 2: tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_trunc_i64_tl(tmp, cpu_V0); break; default: abort(); @@ -6320,7 +6329,6 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn if (rm & 1) { return 1; } - TCGV_UNUSED(tmp2); for (pass = 0; pass < 2; pass++) { neon_load_reg64(cpu_V0, rm + pass); tmp = tcg_temp_new_i32(); @@ -6398,7 +6406,6 @@ static int disas_neon_data_insn(CPUARMState *env, DisasContext *s, uint32_t insn for (pass = 0; pass < (q ? 4 : 2); pass++) { if (neon_2rm_is_float_op(op)) { tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); - TCGV_UNUSED(tmp); } else { tmp = neon_load_reg(rm, pass); } @@ -6859,11 +6866,11 @@ static int disas_coproc_insn(CPUARMState *env, DisasContext *s, uint32_t insn) { static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { TCGv tmp; tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, val); + tcg_gen_trunc_i64_tl(tmp, val); store_reg(s, rlow, tmp); tmp = tcg_temp_new_i32(); tcg_gen_shri_i64(val, val, 32); - tcg_gen_trunc_i64_i32(tmp, val); + tcg_gen_trunc_i64_tl(tmp, val); store_reg(s, rhigh, tmp); } @@ -6958,8 +6965,8 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv a #else static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv addr, int size) { TCGv tmp; - int done_label; - int fail_label; + TCGLabel *done_label; + TCGLabel *fail_label; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { [addr] = {Rt}; @@ -7031,7 +7038,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { TCGv addr; TCGv_i64 tmp64; - insn = arm_ldl_code(s->pc, s->bswap_code); + insn = arm_ldl_code(s->env, s->pc, s->bswap_code); s->pc += 4; /* M variants do not implement ARM mode. */ @@ -7347,7 +7354,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { tmp = load_cpu_field(spsr); } else { tmp = tcg_temp_new_i32(); - gen_helper_cpsr_read(tmp); + gen_helper_cpsr_read(tmp, cpu_env); } store_reg(s, rd, tmp); } @@ -7436,7 +7443,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_trunc_i64_tl(tmp, tmp64); tcg_temp_free_i64(tmp64); if ((sh & 2) == 0) { tmp2 = load_reg(s, rn); @@ -7508,7 +7515,6 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { rn = (insn >> 16) & 0xf; tmp = load_reg(s, rn); } else { - TCGV_UNUSED(tmp); } rd = (insn >> 12) & 0xf; switch (op1) { @@ -7532,11 +7538,11 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { if (IS_USER(s)) { goto illegal_op; } - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); gen_exception_return(s, tmp); } else { if (set_cc) { - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); } else { tcg_gen_sub_i32(tmp, tmp, tmp2); } @@ -7545,7 +7551,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x03: if (set_cc) { - gen_helper_sub_cc(tmp, tmp2, tmp); + gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp); } else { tcg_gen_sub_i32(tmp, tmp2, tmp); } @@ -7553,7 +7559,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x04: if (set_cc) { - gen_helper_add_cc(tmp, tmp, tmp2); + gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } @@ -7561,7 +7567,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x05: if (set_cc) { - gen_helper_adc_cc(tmp, tmp, tmp2); + gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); } else { gen_add_carry(tmp, tmp, tmp2); } @@ -7569,7 +7575,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x06: if (set_cc) { - gen_helper_sbc_cc(tmp, tmp, tmp2); + gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2); } else { gen_sub_carry(tmp, tmp, tmp2); } @@ -7577,7 +7583,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x07: if (set_cc) { - gen_helper_sbc_cc(tmp, tmp2, tmp); + gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp); } else { gen_sub_carry(tmp, tmp2, tmp); } @@ -7599,13 +7605,13 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { break; case 0x0a: if (set_cc) { - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); } tcg_temp_free_i32(tmp); break; case 0x0b: if (set_cc) { - gen_helper_add_cc(tmp, tmp, tmp2); + gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); } tcg_temp_free_i32(tmp); break; @@ -8045,7 +8051,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_trunc_i64_tl(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rn, tmp); break; @@ -8243,7 +8249,6 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { /* compute total size */ loaded_base = 0; - TCGV_UNUSED(loaded_var); n = 0; for (i = 0; i < 16; i++) { if (insn & (1 << i)) @@ -8275,7 +8280,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { tmp = gen_ld32(addr, IS_USER(s)); if (user) { tmp2 = tcg_const_i32(i); - gen_helper_set_user_reg(tmp2, tmp); + gen_helper_set_user_reg(cpu_env, tmp2, tmp); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); } else if (i == rn) { @@ -8294,7 +8299,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { } else if (user) { tmp = tcg_temp_new_i32(); tmp2 = tcg_const_i32(i); - gen_helper_get_user_reg(tmp, tmp2); + gen_helper_get_user_reg(tmp, cpu_env, tmp2); tcg_temp_free_i32(tmp2); } else { tmp = load_reg(s, i); @@ -8414,31 +8419,31 @@ static int gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shift break; case 8: /* add */ if (conds) - gen_helper_add_cc(t0, t0, t1); + gen_helper_add_cc(t0, cpu_env, t0, t1); else tcg_gen_add_i32(t0, t0, t1); break; case 10: /* adc */ if (conds) - gen_helper_adc_cc(t0, t0, t1); + gen_helper_adc_cc(t0, cpu_env, t0, t1); else gen_adc(t0, t1); break; case 11: /* sbc */ if (conds) - gen_helper_sbc_cc(t0, t0, t1); + gen_helper_sbc_cc(t0, cpu_env, t0, t1); else gen_sub_carry(t0, t0, t1); break; case 13: /* sub */ if (conds) - gen_helper_sub_cc(t0, t0, t1); + gen_helper_sub_cc(t0, cpu_env, t0, t1); else tcg_gen_sub_i32(t0, t0, t1); break; case 14: /* rsb */ if (conds) - gen_helper_sub_cc(t0, t1, t0); + gen_helper_sub_cc(t0, cpu_env, t1, t0); else tcg_gen_sub_i32(t0, t1, t0); break; @@ -8509,7 +8514,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw /* Fall through to 32-bit decode. */ } - insn = arm_lduw_code(s->pc, s->bswap_code); + insn = arm_lduw_code(s->env, s->pc, s->bswap_code); s->pc += 2; insn |= (uint32_t) insn_hw1 << 16; @@ -8661,7 +8666,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw gen_st32(tmp, addr, 0); tcg_gen_addi_i32(addr, addr, 4); tmp = tcg_temp_new_i32(); - gen_helper_cpsr_read(tmp); + gen_helper_cpsr_read(tmp, cpu_env); gen_st32(tmp, addr, 0); if (insn & (1 << 21)) { if ((insn & (1 << 24)) == 0) { @@ -8690,7 +8695,6 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tcg_gen_addi_i32(addr, addr, -offset); } - TCGV_UNUSED(loaded_var); for (i = 0; i < 16; i++) { if ((insn & (1 << i)) == 0) continue; @@ -8951,7 +8955,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_trunc_i64_tl(tmp, tmp64); tcg_temp_free_i64(tmp64); if (rs != 15) { tmp2 = load_reg(s, rs); @@ -8975,7 +8979,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_trunc_i64_tl(tmp, tmp64); tcg_temp_free_i64(tmp64); break; case 7: /* Unsigned sum of absolute differences. */ @@ -9200,7 +9204,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw gen_helper_v7m_mrs(tmp, cpu_env, addr); tcg_temp_free_i32(addr); } else { - gen_helper_cpsr_read(tmp); + gen_helper_cpsr_read(tmp, cpu_env); } store_reg(s, rd, tmp); break; @@ -9573,7 +9577,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { } } - insn = arm_lduw_code(s->pc, s->bswap_code); + insn = arm_lduw_code(s->env, s->pc, s->bswap_code); s->pc += 2; switch (insn >> 12) { @@ -9599,12 +9603,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); } else { if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else - gen_helper_add_cc(tmp, tmp, tmp2); + gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); } tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); @@ -9636,7 +9640,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { tcg_gen_movi_i32(tmp2, insn & 0xff); switch (op) { case 1: /* cmp */ - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); break; @@ -9644,7 +9648,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else - gen_helper_add_cc(tmp, tmp, tmp2); + gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; @@ -9652,7 +9656,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; @@ -9688,7 +9692,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { case 1: /* cmp */ tmp = load_reg(s, rd); tmp2 = load_reg(s, rm); - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); break; @@ -9737,7 +9741,6 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { } else if (op != 0xf) { /* mvn doesn't read its first operand */ tmp = load_reg(s, rd); } else { - TCGV_UNUSED(tmp); } tmp2 = load_reg(s, rm); @@ -9756,7 +9759,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) { gen_helper_shl(tmp2, tmp2, tmp); } else { - gen_helper_shl_cc(tmp2, tmp2, tmp); + gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; @@ -9764,7 +9767,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) { gen_helper_shr(tmp2, tmp2, tmp); } else { - gen_helper_shr_cc(tmp2, tmp2, tmp); + gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; @@ -9772,7 +9775,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) { gen_helper_sar(tmp2, tmp2, tmp); } else { - gen_helper_sar_cc(tmp2, tmp2, tmp); + gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; @@ -9780,20 +9783,20 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) gen_adc(tmp, tmp2); else - gen_helper_adc_cc(tmp, tmp, tmp2); + gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); break; case 0x6: /* sbc */ if (s->condexec_mask) gen_sub_carry(tmp, tmp, tmp2); else - gen_helper_sbc_cc(tmp, tmp, tmp2); + gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2); break; case 0x7: /* ror */ if (s->condexec_mask) { tcg_gen_andi_i32(tmp, tmp, 0x1f); tcg_gen_rotr_i32(tmp2, tmp2, tmp); } else { - gen_helper_ror_cc(tmp2, tmp2, tmp); + gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; @@ -9806,14 +9809,14 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (s->condexec_mask) tcg_gen_neg_i32(tmp, tmp2); else - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); break; case 0xa: /* cmp */ - gen_helper_sub_cc(tmp, tmp, tmp2); + gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); rd = 16; break; case 0xb: /* cmn */ - gen_helper_add_cc(tmp, tmp, tmp2); + gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); rd = 16; break; case 0xc: /* orr */ @@ -10061,7 +10064,6 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { tcg_gen_addi_i32(addr, addr, 4); } } - TCGV_UNUSED(tmp); if (insn & (1 << 8)) { if (insn & (1 << 11)) { /* pop pc */ @@ -10084,7 +10086,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if ((insn & 0x0900) == 0x0900) { store_reg_from_load(env, s, 15, tmp); // To find how many other regs pop with pc - TPRINTF("pc = 0x%x addr = 0x%x tmp = 0x%x\n", env->regs[15], addr, tmp); + TPRINTF("pc = 0x%x sp = 0x%x \n", env->regs[15], env->regs[13]); if (env->v7m.exception != 0 && IS_M(env)) { count = 0; for (k = 0; k < 8; k++) { @@ -10211,7 +10213,6 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { case 12: { /* load/store multiple */ TCGv loaded_var; - TCGV_UNUSED(loaded_var); rn = (insn >> 8) & 0x7; addr = load_reg(s, rn); for (i = 0; i < 8; i++) { @@ -10297,13 +10298,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { } /* generate intermediate code in gen_opc_buf and gen_opparam_buf for - basic block 'tb'. If search_pc is TRUE, also generate PC - information for each intermediate instruction. */ -static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationBlock *tb, int search_pc) { + basic block 'tb'.*/ +static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationBlock *tb) { DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; - uint16_t *gen_opc_end; - int j, lj; + int lj; target_ulong pc_start; uint32_t next_page_start; int num_insns; @@ -10312,10 +10311,11 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB /* generate intermediate code */ pc_start = tb->pc; - dc->tb = tb; + memset(dc, 0, sizeof(*dc)); - gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + dc->tb = tb; + dc->env = env; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; @@ -10343,31 +10343,16 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) - max_insns = CF_COUNT_MASK; - - // gen_icount_start(); + max_insns = TCG_MAX_INSNS; tcg_clear_temp_count(); +#ifndef STATIC_TRANSLATOR + gen_tb_start(tb); +#endif #ifdef CONFIG_SYMBEX - tb_precise_pc_t *p; - - /* When doing retranslation to LLVM, avoid clobbering - * existing pc recovery info, which is relied upon by - * the existing machine code. */ - int generate_pc_recovery_info = tb->precise_entries == 0; - - dc->invalid_instr = 0; - - if (!search_pc && generate_pc_recovery_info) { - p = tb->precise_pcs; - } - - if (search_pc) { - generate_pc_recovery_info = 0; - } - dc->instrument = !search_pc; + dc->instrument = 1; dc->enable_jmp_im = 1; dc->cpuState = env; @@ -10377,13 +10362,9 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB temporary one in case we regenerate LLVM bitcode */ #ifndef STATIC_TRANSLATOR TCGv_i64 tmp64; - tmp64 = tcg_temp_new_i64(); - if (tb->originalTb) { - tcg_gen_movi_i64(tmp64, (uint64_t) tb->originalTb); - } else { - tcg_gen_movi_i64(tmp64, (uint64_t) tb); - } + tmp64 = tcg_temp_new_i64(); + tcg_gen_movi_i64(tmp64, (uint64_t) tb); tcg_gen_st_i64(tmp64, cpu_env, offsetof(CPUArchState, se_current_tb)); if (unlikely(*g_sqi.events.on_translate_block_start_signals_count && dc->instrument)) { @@ -10462,41 +10443,11 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } } } -#ifdef CONFIG_SYMBEX - /* Generate precise PC recovery information */ - if (generate_pc_recovery_info && !search_pc) { - int cur_opc = gen_opc_ptr - gen_opc_buf; - - if (num_insns > 0 && (p - 1)->opc == cur_opc) { - // The instruction was a nop - --p; - --tb->precise_entries; - } - - p->guest_pc_increment = dc->pc - pc_start; - p->opc = cur_opc; - ++p; - ++tb->precise_entries; - } -#endif - if (search_pc) { - j = gen_opc_ptr - gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - gen_opc_instr_start[lj++] = 0; - } - gen_opc_pc[lj] = dc->pc; - gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); - gen_opc_instr_start[lj] = 1; - gen_opc_icount[lj] = num_insns; - } - // if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) // gen_io_start(); // if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { - // tcg_gen_debug_insn_start(dc->pc); + // tcg_gen_insn_start(dc->pc); // } #ifdef CONFIG_SYMBEX dc->insPc = dc->pc; @@ -10510,10 +10461,8 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB tb->pcOfLastInstr = pc_start; dc->useNextPc = 0; dc->nextPc = -1; - - dc->ins_opc = gen_opc_ptr; - dc->ins_arg = gen_opparam_ptr; #endif + tcg_gen_insn_start(dc->pc, (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), 0); if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { @@ -10526,6 +10475,13 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } else { disas_arm_insn(env, dc); } +#ifdef CONFIG_SYMBEX + // Compute the register mask and send the onRegisterAccess event + if (unlikely(*g_sqi.events.on_translate_register_access_signals_count && dc->instrument)) { + instr_translate_compute_reg_mask_end(dc); + } +#endif + #ifdef CONFIG_SYMBEX if (!dc->is_jmp) { // Allow proper pc update for onTranslateInstruction events @@ -10550,7 +10506,7 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ num_insns++; - } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && !singlestep && + } while (!dc->is_jmp && !tcg_op_buf_full() && !env->singlestep_enabled && !singlestep && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { @@ -10609,7 +10565,7 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB gen_eob_event(dc, 0, 0); #endif /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(0); + tcg_gen_exit_tb(NULL, 0); break; case DISAS_TB_JUMP: /* nothing more to generate */ @@ -10630,40 +10586,31 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB } done_generating: - // gen_icount_end(tb, num_insns); - *gen_opc_ptr = INDEX_op_end; - - //#ifdef DEBUG_DISAS - // if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { - // qemu_log("----------------\n"); - // qemu_log("IN: %s\n", lookup_symbol(pc_start)); - // log_target_disas(pc_start, dc->pc - pc_start, - // dc->thumb | (dc->bswap_code << 1)); - // qemu_log("\n"); - // } - //#endif - if (search_pc) { - j = gen_opc_ptr - gen_opc_buf; - lj++; - while (lj <= j) - gen_opc_instr_start[lj++] = 0; - } else { - tb->size = dc->pc - pc_start; - tb->icount = num_insns; +#ifndef STATIC_TRANSLATOR + gen_tb_end(tb); +#endif + +#ifdef DEBUG_DISAS + if (libcpu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + libcpu_log("----------------\n"); + libcpu_log("IN %#" PRIx64 ":\n", (uint64_t) pc_start); + log_target_disas(env, pc_start, dc->pc - pc_start, + dc->thumb | (dc->bswap_code << 1)); + libcpu_log("\n"); + } +#endif + + tb->size = dc->pc - pc_start; + tb->icount = num_insns; #ifdef CONFIG_SYMBEX - if (unlikely(*g_sqi.events.on_translate_block_complete_signals_count && dc->instrument)) { - g_sqi.events.on_translate_block_complete(tb, dc->insPc); - } -#endif + if (unlikely(*g_sqi.events.on_translate_block_complete_signals_count && dc->instrument)) { + g_sqi.events.on_translate_block_complete(tb, dc->insPc); } +#endif } void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) { - gen_intermediate_code_internal(env, tb, 0); -} - -void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) { - gen_intermediate_code_internal(env, tb, 1); + gen_intermediate_code_internal(env, tb); } static const char *cpu_mode_names[16] = {"usr", "fiq", "irq", "svc", "???", "???", "???", "abt", @@ -10714,7 +10661,7 @@ void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf, int #endif } -void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) { - env->regs[15] = gen_opc_pc[pc_pos]; - env->condexec_bits = gen_opc_condexec_bits[pc_pos]; +void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, target_ulong *data) { + env->regs[15] = data[0]; + env->condexec_bits = data[1]; } From b3492f15fdf35fd7f0f5d717e4110f34811a90c0 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 25 Dec 2019 12:32:37 -0500 Subject: [PATCH 23/59] CMakeLists:TARGET_INSN_START_EXTRA_WORDS is 2 in ARM Signed-off-by: chaojixx --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 89c3168..6f24ea3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,7 @@ elseif(WITH_TARGET MATCHES "arm") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARCH \"arm\"\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_ARM 1\n") file(APPEND ${CONFIG_TARGET_H} "#define TARGET_LONG_ALIGNMENT 4\n") - file(APPEND ${CONFIG_TARGET_H} "#define TARGET_INSN_START_EXTRA_WORDS 1\n") + file(APPEND ${CONFIG_TARGET_H} "#define TARGET_INSN_START_EXTRA_WORDS 2\n") set(TARGET_DIR "target-arm") else() From ac5dfdf7a3fb25f30e5433f88b89d223b378e88f Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 25 Dec 2019 21:40:01 -0500 Subject: [PATCH 24/59] se_libcpu_config.h: add arm SE_RAM_OBJECT_BIT Signed-off-by: chaojixx --- include/cpu/se_libcpu_config.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/include/cpu/se_libcpu_config.h b/include/cpu/se_libcpu_config.h index 751aa75..964eef4 100644 --- a/include/cpu/se_libcpu_config.h +++ b/include/cpu/se_libcpu_config.h @@ -32,14 +32,19 @@ extern "C" { /** This defines the size of each MemoryObject that represents physical RAM. Larger values save some memory, smaller (exponentially) decrease solving time for constraints with symbolic addresses */ -//#ifdef SE_ENABLE_TLB +#ifdef SE_ENABLE_TLB // XXX: Use TARGET_PAGE_BITS somehow... -//#define SE_RAM_OBJECT_BITS 12 -//#else +#if defined(TARGET_I386) || defined(TARGET_X86_64) +#define SE_RAM_OBJECT_BITS 12 +#elif defined(TARGET_ARM) +#define SE_RAM_OBJECT_BITS 10 +#else +#error Unsupported target architecture +#endif +#else /* Do not touch this */ - #define SE_RAM_OBJECT_BITS TARGET_PAGE_BITS -//#endif +#endif /** Force page sizes to be the native size. A symbex engine could perform dynamic page splitting in case of symbolic addresses, so there is no need to tweak this value anymore. */ From b8aac2ea81b3af99524dd820ca40b5d7e014a51f Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 25 Dec 2019 22:27:06 -0500 Subject: [PATCH 25/59] add TARGET_X86_64 Signed-off-by: chaojixx --- include/cpu/apic.h | 2 +- include/cpu/exec.h | 2 +- include/cpu/memdbg.h | 2 +- src/cpu-exec.c | 8 ++++---- src/disas.c | 2 +- src/exec-ram.h | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/cpu/apic.h b/include/cpu/apic.h index 969e852..a5a8d43 100644 --- a/include/cpu/apic.h +++ b/include/cpu/apic.h @@ -21,7 +21,7 @@ #include -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include diff --git a/include/cpu/exec.h b/include/cpu/exec.h index f36f28c..50d5160 100644 --- a/include/cpu/exec.h +++ b/include/cpu/exec.h @@ -21,7 +21,7 @@ #include -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include diff --git a/include/cpu/memdbg.h b/include/cpu/memdbg.h index 1ed5d56..d81893a 100644 --- a/include/cpu/memdbg.h +++ b/include/cpu/memdbg.h @@ -19,7 +19,7 @@ #define __LIBCPU_MEMDBG_H__ #include -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 369bd7f..0ac8b9c 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -562,7 +562,7 @@ static bool execution_loop(CPUArchState *env) { } if (env->kvm_request_interrupt_window && -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) (env->mflags & IF_MASK)) { #elif defined(TARGET_ARM) !(env->uncached_cpsr & CPSR_I)) { @@ -612,7 +612,7 @@ int cpu_exec(CPUArchState *env) { * This usually happens when TB cache is flushed but current tb is not reset. */ env->current_tb = NULL; -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) DPRINTF(" setjmp entered eip=%#lx\n", (uint64_t) env->eip); #elif defined(TARGET_ARM) DPRINTF(" setjmp entered r15=%#x\n", (uint32_t) env->regs[15]); @@ -659,7 +659,7 @@ int cpu_exec(CPUArchState *env) { env = cpu_single_env; } } /* for(;;) */ -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) DPRINTF("cpu_loop exit ret=%#x eip=%#lx\n", ret, (uint64_t) env->eip); #elif defined(TARGET_ARM) DPRINTF("cpu_loop exit ret=%#x r15=%#x\n", ret, (uint32_t) env->regs[15]); @@ -669,7 +669,7 @@ int cpu_exec(CPUArchState *env) { env->current_tb = NULL; -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #ifdef CONFIG_SYMBEX g_sqi.regs.set_cc_op_eflags(env); #else diff --git a/src/disas.c b/src/disas.c index 720cf59..49fe6c4 100644 --- a/src/disas.c +++ b/src/disas.c @@ -18,7 +18,7 @@ #include #include -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include diff --git a/src/exec-ram.h b/src/exec-ram.h index 406867d..b0c0201 100644 --- a/src/exec-ram.h +++ b/src/exec-ram.h @@ -24,7 +24,7 @@ #include #include -#if defined(TARGET_I386) +#if defined(TARGET_I386) || defined(TARGET_X86_64) #include #elif defined(TARGET_ARM) #include From 04874da203027e2904eea18f3c2301a4900fade3 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 27 Dec 2019 17:31:51 -0500 Subject: [PATCH 26/59] cpu-exec: ltb should be reset after processing interrupts Signed-off-by: chaojixx --- src/cpu-exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 0ac8b9c..530e293 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -504,7 +504,7 @@ static bool execution_loop(CPUArchState *env) { * ensure that no TB jump will be modified as * the program flow was changed */ - last_tb = 0; + ltb = NULL; has_interrupt = true; } From c56160b2d8f55497e1a990f835c7015beecf765c Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 30 Dec 2019 17:02:00 -0500 Subject: [PATCH 27/59] softmmu_template.h: no need for original mmio rw for symbolic mmio ports Signed-off-by: chaojixx --- src/softmmu_template.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/softmmu_template.h b/src/softmmu_template.h index c6f87b1..a474ed7 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -425,17 +425,20 @@ void glue(glue(io_write, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr SE_SET_MEM_IO_VADDR(env, addr, 0); env->mem_io_pc = (uintptr_t) retaddr; + + if (likely(!g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { #if SHIFT <= 2 - ops->write(physaddr, val, 1 << SHIFT); + ops->write(physaddr, val, 1 << SHIFT); #else #ifdef TARGET_WORDS_BIGENDIAN - ops->write(physaddr, (val >> 32), 4); - ops->write(physaddr + 4, (uint32_t) val, 4); + ops->write(physaddr, (val >> 32), 4); + ops->write(physaddr + 4, (uint32_t) val, 4); #else - ops->write(physaddr, (uint32_t) val, 4); - ops->write(physaddr + 4, val >> 32, 4); + ops->write(physaddr, (uint32_t) val, 4); + ops->write(physaddr + 4, val >> 32, 4); #endif #endif /* SHIFT > 2 */ + } } void glue(glue(io_write_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr_t physaddr, DATA_TYPE val, From 401e95f86a6141ad9df121a370c4fc56f74c7158 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 9 Jan 2020 12:03:26 -0500 Subject: [PATCH 28/59] cpu-exec:only allow interrupt in concrete mode Signed-off-by: chaojixx --- src/cpu-exec.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 530e293..e20694d 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -451,9 +451,11 @@ static bool process_interrupt_request(CPUArchState *env) { if ((interrupt_request & CPU_INTERRUPT_HARD) && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { if ((armv7m_nvic_can_take_pending_exception(env->nvic))) { - env->exception_index = EXCP_IRQ; - do_interrupt(env); - has_interrupt = true; + if (likely(*g_sqi.mode.fast_concrete_invocation && **g_sqi.mode.running_concrete)) { + env->exception_index = EXCP_IRQ; + do_interrupt(env); + has_interrupt = true; + } } else { DPRINTF("cpu basepri = %d take_exc = %d\n", env->v7m.basepri, armv7m_nvic_can_take_pending_exception(env->nvic)); } From 4b2cfa58e8484b88c30c4b5483354700b8530c8d Mon Sep 17 00:00:00 2001 From: chaojixx Date: Thu, 9 Jan 2020 16:21:56 -0500 Subject: [PATCH 29/59] target-arm/translate.c: update gen_test_cc from 1.0 to 3.0 Signed-off-by: chaojixx --- src/target-arm/translate.c | 103 +++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 56 deletions(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index e505181..141fbd2 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -728,98 +728,89 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) { static void gen_test_cc(int cc, TCGLabel *label) { TCGv tmp; TCGv tmp2; - TCGLabel *inv; + + TCGv_i32 value; + TCGCond cond; switch (cc) { case 0: /* eq: Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; case 1: /* ne: !Z */ - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); + cond = TCG_COND_EQ; + value = load_cpu_field(ZF); break; case 2: /* cs: C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - break; case 3: /* cc: !C */ - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + cond = TCG_COND_NE; + value = load_cpu_field(CF); break; case 4: /* mi: N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; case 5: /* pl: !N */ - tmp = load_cpu_field(NF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + cond = TCG_COND_LT; + value = load_cpu_field(NF); break; case 6: /* vs: V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); - break; case 7: /* vc: !V */ - tmp = load_cpu_field(VF); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); + cond = TCG_COND_LT; + value = load_cpu_field(VF); break; case 8: /* hi: C && !Z */ - inv = gen_new_label(); - tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - tcg_temp_free_i32(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); - gen_set_label(inv); - break; case 9: /* ls: !C || Z */ + cond = TCG_COND_NE; + value = tcg_temp_new_i32(); tmp = load_cpu_field(CF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tmp2 = load_cpu_field(ZF); + /* CF is 1 for C, so -CF is an all-bits-set mask for C; + ZF is non-zero for !Z; so AND the two subexpressions. */ + tcg_gen_neg_i32(value, tmp); + tcg_gen_and_i32(value, value, tmp2); tcg_temp_free_i32(tmp); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - break; - case 10: /* ge: N == V -> N ^ V == 0 */ - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; + case 10: /* ge: N == V -> N ^ V == 0 */ case 11: /* lt: N != V -> N ^ V != 0 */ + cond = TCG_COND_GE; + value = tcg_temp_new_i32(); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); + tcg_gen_xor_i32(value, tmp, tmp2); + tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 12: /* gt: !Z && N == V */ - inv = gen_new_label(); - tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); - tcg_temp_free_i32(tmp); + case 13: /* le: Z || N != V */ + cond = TCG_COND_NE; + value = tcg_temp_new_i32(); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); + /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate + * the sign bit then AND with ZF to yield the result. */ + tcg_gen_xor_i32(value, tmp, tmp2); + tcg_gen_sari_i32(value, value, 31); + tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); - gen_set_label(inv); - break; - case 13: /* le: Z || N != V */ tmp = load_cpu_field(ZF); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); + tcg_gen_andc_i32(value, tmp, value); tcg_temp_free_i32(tmp); - tmp = load_cpu_field(VF); - tmp2 = load_cpu_field(NF); - tcg_gen_xor_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; + case 14: /* always */ + case 15: /* always */ + /* Use the ALWAYS condition, which will fold early. + * It doesn't matter what we use for the value. */ + cond = TCG_COND_ALWAYS; + value = load_cpu_field(ZF); + goto no_invert; default: fprintf(stderr, "Bad condition code 0x%x\n", cc); abort(); } - tcg_temp_free_i32(tmp); + + if (cc & 1) { + cond = tcg_invert_cond(cond); + } + + no_invert: + tcg_gen_brcondi_i32(cond, value, 0, label); + tcg_temp_free_i32(value); } static const uint8_t table_logic_cc[16] = { From 58af1d4f5ef16b8cf9c6f15c3b1e6233e827a5da Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 15 Jan 2020 17:11:38 -0500 Subject: [PATCH 30/59] kvm.h: add firmware init interfaces get memory regions including rom and ram get entry point get vtor Signed-off-by: chaojixx --- include/cpu/kvm.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/cpu/kvm.h b/include/cpu/kvm.h index 7a2f151..24e23a1 100644 --- a/include/cpu/kvm.h +++ b/include/cpu/kvm.h @@ -797,6 +797,9 @@ struct kvm_ppc_smmu_info { /* Indicates that the KVM provided uses DBT instead of actual KVM */ #define KVM_CAP_DBT 259 +/* This capability allows user to customize memory regions */ +#define KVM_CAP_USER_CUSTOM_MEM_REGION 301 + /****************************************/ #ifdef KVM_CAP_IRQ_ROUTING @@ -1111,6 +1114,7 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_M_REGS _IOW(KVMIO, 0xc1, struct kvm_m_regs) #define KVM_GET_M_SREGS _IOR(KVMIO, 0xc2, struct kvm_m_sregs) #define KVM_SET_M_SREGS _IOW(KVMIO, 0xc3, struct kvm_m_sregs) +#define KVM_CUSTOM_M_INIT _IOR(KVMIO, 0xc4, struct kvm_m_vcpu_init) #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) @@ -1232,6 +1236,17 @@ struct kvm_dev_snapshot { #define KVM_SET_CLOCK_SCALE _IOWR(KVMIO, 0xf8, unsigned *) +/* Available with KVM_CAP_USER_CUSTOM_MEM_REGION */ +struct kvm_mem_init { + __u32 baseaddr; + __u32 size; + __u8 num; + /* If is_rom == 0, indicates expected memoy region is ram */ + __u8 is_rom; +}; + +#define KVM_MEM_REGION_INIT _IOWR(KVMIO, 0xf9, struct kvm_mem_init) + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) From d768c7c522b830d6c4a04420b881104dbaff9354 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 20 Jan 2020 16:59:45 -0500 Subject: [PATCH 31/59] exe.c: use phy_addr for mem_desc_find in ARM Signed-off-by: chaojixx --- src/exec.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/exec.c b/src/exec.c index 5227ddc..5cb9a3d 100644 --- a/src/exec.c +++ b/src/exec.c @@ -538,7 +538,13 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) { cpu_ldub_code(env1, addr); } pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; +#if defined(TARGET_I386) || defined(TARGET_X86_64) if (!mem_desc_find(pd)) { +#elif defined(TARGET_ARM) + if (!mem_desc_find(addr)) { +#else +#error Unsupported target architecture +#endif #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC) cpu_unassigned_access(env1, addr, 0, 1, 0, 4); #else From 82533c1a49294c3f409ebb881f35bb0a34a5fcca Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 20 Jan 2020 17:02:10 -0500 Subject: [PATCH 32/59] kvm_arm.h: add kvm_m_vcpu_init struct for KVM_CUSTOM_M_INIT ioctl interface Signed-off-by: chaojixx --- include/cpu/arm/kvm_arm.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/cpu/arm/kvm_arm.h b/include/cpu/arm/kvm_arm.h index 27b2c14..9cb7ec6 100644 --- a/include/cpu/arm/kvm_arm.h +++ b/include/cpu/arm/kvm_arm.h @@ -82,6 +82,12 @@ struct kvm_m_sregs { void *nvic; }; +struct kvm_m_vcpu_init { + __u32 entry; + __u32 msp_init; + __u32 vtor; +}; + /* Supported Processor Types */ #define KVM_ARM_TARGET_CORTEX_A15 0 #define KVM_ARM_TARGET_CORTEX_A7 1 From 266afd7f921b7c13284ab0b077c2b2f89c873385 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Wed, 22 Jan 2020 16:55:27 -0500 Subject: [PATCH 33/59] softmmu_template.h: write symbol value to io casue unexpected state fork Signed-off-by: chaojixx --- src/softmmu_template.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/softmmu_template.h b/src/softmmu_template.h index a474ed7..751aa4e 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -406,9 +406,9 @@ void glue(glue(io_write, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr #if defined(CONFIG_SYMBEX) && defined(CONFIG_SYMBEX_MP) // XXX: avoid switch to symbolic mode here, not needed for writes - if (unlikely(tcg_is_dyngen_addr(retaddr) && g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { - g_sqi.exec.switch_to_symbolic(retaddr); - } + //if (unlikely(tcg_is_dyngen_addr(retaddr) && g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { + // g_sqi.exec.switch_to_symbolic(retaddr); + //} if (unlikely(is_notdirty_ops(ops))) { CPUTLBEntry *e = env->se_tlb_current; @@ -465,6 +465,7 @@ void glue(glue(io_write_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_ SE_SET_MEM_IO_VADDR(env, addr, 0); env->mem_io_pc = (uintptr_t) retaddr; + #if SHIFT <= 2 if (se_ismemfunc(ops, 1)) { uintptr_t pa = se_notdirty_mem_write(physaddr, 1 << SHIFT); @@ -485,8 +486,11 @@ void glue(glue(io_write_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_ #endif #endif /* SHIFT > 2 */ - // By default, call the original io_write function, which is external - glue(glue(io_write, SUFFIX), MMUSUFFIX)(env, origaddr, val, addr, retaddr); + if (likely(!g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { + // By default, call the original io_write function, which is external + glue(glue(io_write, SUFFIX), MMUSUFFIX)(env, origaddr, val, addr, retaddr); + } + end: tcg_llvm_trace_mmio_access(addr, val, DATA_SIZE, 1); From 7931e67be3f968b2df66195b5f7863fa9d142921 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 24 Jan 2020 16:10:34 -0500 Subject: [PATCH 34/59] cpu-exec.c: in case execution mode switch during interrupt Exection mode switch needs to exit current execution loop. However, at that time the execution may not complete interrupt, so that will incur previous same interrupt again which should be avoided. Signed-off-by: chaojixx --- src/cpu-exec.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index e20694d..82b79e3 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -486,9 +486,11 @@ static int process_exceptions(CPUArchState *env) { cpu_handle_debug_exception(env); } } else { - DPRINTF(" do_interrupt exidx=%x\n", env->exception_index); - do_interrupt(env); - env->exception_index = -1; + if (env->exception_index != 5) { + DPRINTF(" do_interrupt exidx=%x\n", env->exception_index); + do_interrupt(env); + env->exception_index = -1; + } } return ret; From 3ffe6674b256c615a0deb699e7f8e1c28144b158 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 24 Jan 2020 16:15:51 -0500 Subject: [PATCH 35/59] target-arm/translate: fix cortex-m interrupt return regs in env struct may not be precise, so it should be replaced with cpu_R struct which refers to gen_bx_excret_final_code of qemu 3.0.0 Signed-off-by: chaojixx --- src/exec-all.h | 7 ++++ src/target-arm/translate.c | 72 ++++++++++++++++++++------------------ 2 files changed, 45 insertions(+), 34 deletions(-) diff --git a/src/exec-all.h b/src/exec-all.h index fd7527e..a93d185 100644 --- a/src/exec-all.h +++ b/src/exec-all.h @@ -42,6 +42,13 @@ extern struct cpu_stats_t g_cpu_stats; #define DISAS_JUMP 1 /* only pc was modified dynamically */ #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ #define DISAS_TB_JUMP 3 /* only pc was modified statically */ +#define DISAS_BX_EXCRET 8 +/* For instructions which want an immediate exit to the main loop, + * as opposed to attempting to use lookup_and_goto_ptr. Unlike + * DISAS_UPDATE this doesn't write the PC on exiting the translation + * loop so you need to ensure something (gen_a64_set_pc_im or runtime + * helper) has done so before we reach return from cpu_tb_exec. + */ #ifdef STATIC_TRANSLATOR /* Accomodate large TBs */ diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 141fbd2..7babfcf 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -9553,7 +9553,6 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; - uint32_t k, count; // only used for counting the number of reglist when poping with pc int i; TCGv tmp; TCGv tmp2; @@ -9703,8 +9702,9 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { } else { gen_bx(s, tmp); if (env->v7m.exception != 0 && IS_M(env) && env->regs[14] >= 0xfffffff0) { - gen_exception(EXCP_EXCEPTION_EXIT); - s->is_jmp = DISAS_UPDATE; + //gen_exception(EXCP_EXCEPTION_EXIT); + //s->is_jmp = DISAS_UPDATE; + s->is_jmp = DISAS_BX_EXCRET; } } break; @@ -10079,17 +10079,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { // To find how many other regs pop with pc TPRINTF("pc = 0x%x sp = 0x%x \n", env->regs[15], env->regs[13]); if (env->v7m.exception != 0 && IS_M(env)) { - count = 0; - for (k = 0; k < 8; k++) { - if ((insn & (1 << k)) != 0) - count++; - } - val = ldl_phys(RR_cpu(env,regs[13]) + count * 4); - // if pop pc is EXC_RETURN invode interrupt exit. - if (val >= 0xfffffff0) { - gen_exception(EXCP_EXCEPTION_EXIT); - s->is_jmp = DISAS_UPDATE; - } + s->is_jmp = DISAS_BX_EXCRET; } } break; @@ -10545,28 +10535,42 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB Hardware breakpoints have already been handled and skip this code. */ gen_set_condexec(dc); - switch (dc->is_jmp) { - case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: + if (dc->is_jmp == DISAS_BX_EXCRET) { + /* Generate the code to finish possible exception return and end the TB */ + TCGLabel *excret_label = gen_new_label(); + /* Is the new PC value in the magic range indicating exception return? */ + tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label); #ifdef CONFIG_SYMBEX - gen_eob_event(dc, 0, 0); + gen_eob_event(dc, 0, 0); #endif - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(NULL, 0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - case DISAS_WFI: - gen_helper_wfi(); - break; - case DISAS_SWI: - gen_exception(EXCP_SWI); - break; + /* No: end the TB as we would for a DISAS_JMP */ + tcg_gen_exit_tb(NULL, 0); + gen_set_label(excret_label); + gen_exception(EXCP_EXCEPTION_EXIT); + } else { + switch (dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: +#ifdef CONFIG_SYMBEX + gen_eob_event(dc, 0, 0); +#endif + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(NULL, 0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + case DISAS_WFI: + gen_helper_wfi(); + break; + case DISAS_SWI: + gen_exception(EXCP_SWI); + break; + } } if (dc->condjmp) { gen_set_label(dc->condlabel); From ac394e5f6474b81ac7314ed3774dcccaa27ff8fe Mon Sep 17 00:00:00 2001 From: chaojixx Date: Sun, 2 Feb 2020 17:25:30 -0500 Subject: [PATCH 36/59] target-arm/translate: add instr_gen_pc_update Signed-off-by: chaojixx --- src/target-arm/translate.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 7babfcf..55f963f 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -9550,6 +9550,14 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw return 1; } +void instr_gen_pc_update(void *context, target_ulong pc); +void instr_gen_pc_update(void *context, target_ulong pc) { + TCGv_i32 cpu_tmp0 = tcg_temp_new_i32(); + tcg_gen_movi_i32(cpu_tmp0, pc); + tcg_gen_st_i32(cpu_tmp0, cpu_env, offsetof(CPUArchState, regs[15])); + tcg_temp_free_i32(cpu_tmp0); +} + static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; From d38669e42a252afa34047b6757e7bf3806d98f1b Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 14 Feb 2020 16:01:45 -0500 Subject: [PATCH 37/59] fixup! target-arm/translate: fix cortex-m interrupt return regs in env struct may not be precise, so it should be replaced with cpu_R struct which refers to gen_bx_excret_final_code of qemu 3.0.0 --- src/target-arm/translate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 55f963f..cc5c8fe 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -9709,7 +9709,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { gen_bx(s, tmp); } else { gen_bx(s, tmp); - if (env->v7m.exception != 0 && IS_M(env) && env->regs[14] >= 0xfffffff0) { + if (env->v7m.exception != 0 && IS_M(env)) { //gen_exception(EXCP_EXCEPTION_EXIT); //s->is_jmp = DISAS_UPDATE; s->is_jmp = DISAS_BX_EXCRET; From b4d2444d8b18c3e02f20aa46501bc3fd05a91ba2 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Fri, 14 Feb 2020 16:04:54 -0500 Subject: [PATCH 38/59] target-arm/helper: set the LSB of the return pc to zero Signed-off-by: chaojixx --- src/target-arm/helper.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 2c663ad..d22f022 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -745,6 +745,8 @@ static void do_v7m_exception_exit(CPUARMState *env) { WR_cpu(env,regs[12],v7m_pop(env)); WR_cpu(env,regs[14],v7m_pop(env)); env->regs[15] = v7m_pop(env); + //thumb2 remove LSB + env->regs[15] = env->regs[15] & ~1; xpsr = v7m_pop(env); xpsr_write(env, xpsr, 0xfffffdff); /* Undo stack alignment. */ From 8aec36aa27f8892519bbd2ec7f4ad22952a292d4 Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 17 Feb 2020 20:56:40 -0500 Subject: [PATCH 39/59] fixup! softmmu_template.h: write symbol value to io casue unexpected state fork --- src/softmmu_template.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/softmmu_template.h b/src/softmmu_template.h index 751aa4e..af57033 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -406,9 +406,9 @@ void glue(glue(io_write, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr #if defined(CONFIG_SYMBEX) && defined(CONFIG_SYMBEX_MP) // XXX: avoid switch to symbolic mode here, not needed for writes - //if (unlikely(tcg_is_dyngen_addr(retaddr) && g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { - // g_sqi.exec.switch_to_symbolic(retaddr); - //} + if (unlikely(tcg_is_dyngen_addr(retaddr) && g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { + g_sqi.exec.switch_to_symbolic(retaddr); + } if (unlikely(is_notdirty_ops(ops))) { CPUTLBEntry *e = env->se_tlb_current; @@ -491,7 +491,6 @@ void glue(glue(io_write_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_ glue(glue(io_write, SUFFIX), MMUSUFFIX)(env, origaddr, val, addr, retaddr); } - end: tcg_llvm_trace_mmio_access(addr, val, DATA_SIZE, 1); SE_SET_MEM_IO_VADDR(env, 0, 1); From 490cf472c2bb92bf48014b9928a7ffe0670d047b Mon Sep 17 00:00:00 2001 From: chaojixx Date: Mon, 17 Feb 2020 21:01:44 -0500 Subject: [PATCH 40/59] helper: move helper instructions to op_helper for symobolic execution Signed-off-by: chaojixx --- src/target-arm/helper.c | 46 -------------------------------------- src/target-arm/op_helper.c | 46 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index d22f022..70303d0 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -584,52 +584,6 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); } -/* Sign/zero extend */ -uint32_t HELPER(sxtb16)(uint32_t x) { - uint32_t res; - res = (uint16_t)(int8_t) x; - res |= (uint32_t)(int8_t)(x >> 16) << 16; - return res; -} - -uint32_t HELPER(uxtb16)(uint32_t x) { - uint32_t res; - res = (uint16_t)(uint8_t) x; - res |= (uint32_t)(uint8_t)(x >> 16) << 16; - return res; -} - -uint32_t HELPER(clz)(uint32_t x) { - uint32_t res; - res = (uint32_t) clz32(x); - return res; -} - -int32_t HELPER(sdiv)(int32_t num, int32_t den) { - if (den == 0) - return 0; - if (num == INT_MIN && den == -1) - return INT_MIN; - return num / den; -} - -uint32_t HELPER(udiv)(uint32_t num, uint32_t den) { - if (den == 0) - return 0; - return num / den; -} - -uint32_t HELPER(rbit)(uint32_t x) { - x = ((x & 0xff000000) >> 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8) | ((x & 0x000000ff) << 24); - x = ((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4); - x = ((x & 0x88888888) >> 3) | ((x & 0x44444444) >> 1) | ((x & 0x22222222) << 1) | ((x & 0x11111111) << 3); - return x; -} - -uint32_t HELPER(abs)(uint32_t x) { - return ((int32_t) x < 0) ? -x : x; -} - /* Map CPU modes onto saved register banks. */ int bank_number(CPUARMState *env, int mode) { switch (mode) { diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 2b1cff7..714e444 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -263,6 +263,52 @@ uint32_t HELPER(usat16)(uint32_t x, uint32_t shift) { return res; } +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) { + uint32_t res; + res = (uint16_t)(int8_t) x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(uxtb16)(uint32_t x) { + uint32_t res; + res = (uint16_t)(uint8_t) x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(clz)(uint32_t x) { + uint32_t res; + res = (uint32_t) clz32(x); + return res; +} + +int32_t HELPER(sdiv)(int32_t num, int32_t den) { + if (den == 0) + return 0; + if (num == INT_MIN && den == -1) + return INT_MIN; + return num / den; +} + +uint32_t HELPER(udiv)(uint32_t num, uint32_t den) { + if (den == 0) + return 0; + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) { + x = ((x & 0xff000000) >> 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8) | ((x & 0x000000ff) << 24); + x = ((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4); + x = ((x & 0x88888888) >> 3) | ((x & 0x44444444) >> 1) | ((x & 0x22222222) << 1) | ((x & 0x11111111) << 3); + return x; +} + +uint32_t HELPER(abs)(uint32_t x) { + return ((int32_t) x < 0) ? -x : x; +} + void HELPER(wfi)(void) { env->exception_index = EXCP_HLT; env->halted = 1; From 4940c84650b19ea479930f100f204a1e3a4acbf6 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Fri, 27 Mar 2020 18:50:34 -0400 Subject: [PATCH 41/59] fixup! target-arm/translate: fix cortex-m interrupt return regs in env struct may not be precise, so it should be replaced with cpu_R struct which refers to gen_bx_excret_final_code of qemu 3.0.0 --- src/target-arm/translate.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index cc5c8fe..0023a8e 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -8694,6 +8694,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp = gen_ld32(addr, IS_USER(s)); if (i == 15) { gen_bx(s, tmp); + if (IS_M(env)) { + s->is_jmp = DISAS_BX_EXCRET; + } } else if (i == rn) { loaded_var = tmp; loaded_base = 1; @@ -9709,9 +9712,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { gen_bx(s, tmp); } else { gen_bx(s, tmp); - if (env->v7m.exception != 0 && IS_M(env)) { - //gen_exception(EXCP_EXCEPTION_EXIT); - //s->is_jmp = DISAS_UPDATE; + if (IS_M(env)) { s->is_jmp = DISAS_BX_EXCRET; } } @@ -10086,7 +10087,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { store_reg_from_load(env, s, 15, tmp); // To find how many other regs pop with pc TPRINTF("pc = 0x%x sp = 0x%x \n", env->regs[15], env->regs[13]); - if (env->v7m.exception != 0 && IS_M(env)) { + if (IS_M(env)) { s->is_jmp = DISAS_BX_EXCRET; } } From 6b8b118fea073032f71ea839c16e29e938d687c7 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 28 Mar 2020 00:27:50 -0400 Subject: [PATCH 42/59] target-arm: add interrupt_flag in env to make sure the execution will not be frequently disrupted by interrupts Signed-off-by: weizhou-chaojixx --- include/cpu/arm/cpu.h | 1 + include/cpu/se_libcpu.h | 1 + src/cpu-exec.c | 3 ++- src/target-arm/helper.c | 3 +++ 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index 903e346..bf38b63 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -210,6 +210,7 @@ typedef struct CPUARMState { int kvm_request_interrupt_window; int kvm_irq; uint8_t timer_interrupt_disabled; + uint64_t interrupt_flag; //indicate in interrupt or not } CPUARMState; CPUARMState *cpu_arm_init(const char *cpu_model); diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index 0e1c253..32bb258 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -79,6 +79,7 @@ struct se_libcpu_interface_t { const int *allow_custom_instructions; const int *concretize_io_writes; const int *concretize_io_addresses; + const int *allow_interrupt; } mode; struct exec { diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 82b79e3..93b66f4 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -451,7 +451,8 @@ static bool process_interrupt_request(CPUArchState *env) { if ((interrupt_request & CPU_INTERRUPT_HARD) && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { if ((armv7m_nvic_can_take_pending_exception(env->nvic))) { - if (likely(*g_sqi.mode.fast_concrete_invocation && **g_sqi.mode.running_concrete)) { + if (likely(*g_sqi.mode.fast_concrete_invocation && **g_sqi.mode.running_concrete && *g_sqi.mode.allow_interrupt) + || unlikely(*g_sqi.mode.allow_interrupt == 2)) { env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 70303d0..5915bbd 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -711,6 +711,7 @@ static void do_v7m_exception_exit(CPUARMState *env) { HPRINTF(" R3=0x%x R4=0x%x R5=0x%x R6=0x%x R7=0x%x R8=0x%x R9=0x%x R10=0x%x R11=0x%x R12=0x%x R2=0x%x R1=0x%x R0=0x%x\n", RR_cpu(env,regs[3]), RR_cpu(env,regs[4]), RR_cpu(env,regs[5]), RR_cpu(env,regs[6]), RR_cpu(env,regs[7]), RR_cpu(env,regs[8]), RR_cpu(env,regs[9]), RR_cpu(env,regs[10]), RR_cpu(env,regs[11]), RR_cpu(env,regs[12]), RR_cpu(env,regs[2]), RR_cpu(env,regs[1]), RR_cpu(env,regs[0])); + env->interrupt_flag = 0; /* ??? The exception return type specifies Thread/Handler mode. However this is also implied by the xPSR value. Not sure what to do if there is a mismatch. */ @@ -746,6 +747,7 @@ void do_interrupt_v7m(CPUARMState *env) { case EXCP_SWI: /* The PC already points to the next instruction. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, false); + env->interrupt_flag = 2; return; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: @@ -769,6 +771,7 @@ void do_interrupt_v7m(CPUARMState *env) { armv7m_nvic_acknowledge_irq(env->nvic); env->v7m.exception = exc; *exception = exc; + env->interrupt_flag = 1; break; case EXCP_EXCEPTION_EXIT: do_v7m_exception_exit(env); From c2a8b08e890893551d13aab5c5bad65521077a0e Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 28 Mar 2020 00:31:27 -0400 Subject: [PATCH 43/59] se_libcpu: add external irqs control interfaces Signed-off-by: weizhou-chaojixx --- include/cpu/se_libcpu.h | 9 +++++++++ src/target-arm/helper.c | 15 +++++++++++++++ src/target-arm/op_helper.c | 16 ++++++++++++++++ 3 files changed, 40 insertions(+) diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index 32bb258..dd47320 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -54,7 +54,16 @@ typedef void (*se_do_interrupt_arm_t)(void); void se_do_interrupt_all(int intno, int is_int, int error_code, target_ulong next_eip, int is_hw); #elif defined(TARGET_ARM) void se_do_interrupt_arm(void); +void se_set_armv7m_external_irq(int irq_num); +void se_enable_all_armv7m_external_irq(int serial); +void se_enable_systick_irq(int mode); +uint32_t se_get_active_armv7m_external_irq(int serial); + void se_helper_do_interrupt_arm(CPUArchState *env); +void se_helper_set_armv7m_external_irq(CPUArchState *env, int irq_num); +void se_helper_enable_all_armv7m_external_irq(CPUArchState *env, int serial); +void se_helper_enable_systick_irq(CPUArchState *env, int mode); +uint32_t se_helper_get_active_armv7m_external_irq(CPUArchState *env, int serial); #else #error Unsupported target architecture #endif diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 5915bbd..e8c431f 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -821,6 +821,21 @@ void do_interrupt_v7m(CPUARMState *env) { #ifdef CONFIG_SYMBEX #include +uint32_t se_helper_get_active_armv7m_external_irq(CPUArchState *env, int serial) { + return armv7m_nvic_get_active_external_irq(env->nvic, serial); +} + +void se_helper_set_armv7m_external_irq(CPUARMState *env, int irq_num) { + armv7m_nvic_set_external_peripheral_irq(env->nvic, irq_num, 1); +} + +void se_helper_enable_all_armv7m_external_irq(CPUARMState *env, int serial) { + armv7m_nvic_enable_all_external_irq(env->nvic, serial, 1); +} + +void se_helper_enable_systick_irq(CPUARMState *env, int mode) { + armv7m_nvic_enable_systick(env->nvic, mode); +} /* This will be called from S2EExecutor if running concretely; It will in turn call the real ARM IRQ handler with current CPUARMState.*/ void do_interrupt(CPUARMState *env) { diff --git a/src/target-arm/op_helper.c b/src/target-arm/op_helper.c index 714e444..8b9f2ea 100644 --- a/src/target-arm/op_helper.c +++ b/src/target-arm/op_helper.c @@ -94,6 +94,22 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t max /* This will be called from S2EExecutor if running concretely; It will in turn call the real ARM IRQ handler with current CPUARMState.*/ +uint32_t se_get_active_armv7m_external_irq(int serial) { + return se_helper_get_active_armv7m_external_irq(env, serial); +} + +void se_set_armv7m_external_irq(int irq_num) { + se_helper_set_armv7m_external_irq(env, irq_num); + cpu_exit(env);//exit cpu loop to invoke the interrupt immediately +} + +void se_enable_all_armv7m_external_irq(int serial) { + se_helper_enable_all_armv7m_external_irq(env, serial); +} + +void se_enable_systick_irq(int mode) { + se_helper_enable_systick_irq(env, mode); +} void se_do_interrupt_arm(void) { se_helper_do_interrupt_arm(env); From 6d2572a6b796cfa984907d49388c619e85413e0e Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Tue, 5 May 2020 00:38:12 -0400 Subject: [PATCH 44/59] fixup! target-arm: add interrupt_flag in env to make sure the execution will not be frequently disrupted by interrupts --- include/cpu/arm/cpu.h | 2 +- src/cpu-exec.c | 7 ++++--- src/target-arm/helper.c | 12 +++++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/include/cpu/arm/cpu.h b/include/cpu/arm/cpu.h index bf38b63..4d4950e 100644 --- a/include/cpu/arm/cpu.h +++ b/include/cpu/arm/cpu.h @@ -210,7 +210,7 @@ typedef struct CPUARMState { int kvm_request_interrupt_window; int kvm_irq; uint8_t timer_interrupt_disabled; - uint64_t interrupt_flag; //indicate in interrupt or not + int interrupt_flag; //indicate in interrupt or not } CPUARMState; CPUARMState *cpu_arm_init(const char *cpu_model); diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 93b66f4..1106035 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -450,15 +450,16 @@ static bool process_interrupt_request(CPUArchState *env) { // in case basepri has not been synced so add exit code condition if ((interrupt_request & CPU_INTERRUPT_HARD) && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { - if ((armv7m_nvic_can_take_pending_exception(env->nvic))) { + int irq_num; + if ((armv7m_nvic_can_take_pending_exception(env->nvic, &irq_num))) { if (likely(*g_sqi.mode.fast_concrete_invocation && **g_sqi.mode.running_concrete && *g_sqi.mode.allow_interrupt) - || unlikely(*g_sqi.mode.allow_interrupt == 2)) { + || unlikely(*g_sqi.mode.allow_interrupt == 2) || unlikely(irq_num != 15)) { env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; } } else { - DPRINTF("cpu basepri = %d take_exc = %d\n", env->v7m.basepri, armv7m_nvic_can_take_pending_exception(env->nvic)); + DPRINTF("cpu basepri = %d take_exc = \n", env->v7m.basepri); } } #endif diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index e8c431f..b9e9e97 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -707,11 +707,12 @@ static void do_v7m_exception_exit(CPUARMState *env) { if (xpsr & 0x200) WR_cpu(env,regs[13],(RR_cpu(env,regs[13]) | 4)); - HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x\n", env->regs[13], env->regs[15]); HPRINTF(" R3=0x%x R4=0x%x R5=0x%x R6=0x%x R7=0x%x R8=0x%x R9=0x%x R10=0x%x R11=0x%x R12=0x%x R2=0x%x R1=0x%x R0=0x%x\n", RR_cpu(env,regs[3]), RR_cpu(env,regs[4]), RR_cpu(env,regs[5]), RR_cpu(env,regs[6]), RR_cpu(env,regs[7]), RR_cpu(env,regs[8]), RR_cpu(env,regs[9]), RR_cpu(env,regs[10]), RR_cpu(env,regs[11]), RR_cpu(env,regs[12]), RR_cpu(env,regs[2]), RR_cpu(env,regs[1]), RR_cpu(env,regs[0])); - env->interrupt_flag = 0; + if (env->interrupt_flag > 0) + env->interrupt_flag -= 1; + HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x flag = %d\n", env->regs[13], env->regs[15], env->interrupt_flag); /* ??? The exception return type specifies Thread/Handler mode. However this is also implied by the xPSR value. Not sure what to do if there is a mismatch. */ @@ -747,7 +748,7 @@ void do_interrupt_v7m(CPUARMState *env) { case EXCP_SWI: /* The PC already points to the next instruction. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, false); - env->interrupt_flag = 2; + //env->interrupt_flag = 2; return; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: @@ -771,7 +772,7 @@ void do_interrupt_v7m(CPUARMState *env) { armv7m_nvic_acknowledge_irq(env->nvic); env->v7m.exception = exc; *exception = exc; - env->interrupt_flag = 1; + env->interrupt_flag += 1; break; case EXCP_EXCEPTION_EXIT: do_v7m_exception_exit(env); @@ -815,7 +816,8 @@ void do_interrupt_v7m(CPUARMState *env) { WR_cpu(env,regs[14],lr); addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); env->regs[15] = addr & 0xfffffffe; - HPRINTF("addr = %x vecbase = %d exce = %d\n", addr, env->v7m.vecbase, env->v7m.exception); + HPRINTF("addr = %x vecbase = %d exce = %d flag = %d\n", addr, env->v7m.vecbase, + env->v7m.exception, env->interrupt_flag); env->thumb = addr & 1; } From b8b4d5ff0a81f9cb884f876577f9706729beb4cc Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Tue, 19 May 2020 14:41:04 -0400 Subject: [PATCH 45/59] comment debug log Signed-off-by: weizhou-chaojixx --- src/cpu-exec.c | 2 +- src/target-arm/helper.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 1106035..7780a4a 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -28,7 +28,7 @@ #define barrier() asm volatile("" ::: "memory") -#define DEBUG_EXEC +// #define DEBUG_EXEC // #define TRACE_EXEC #ifdef DEBUG_EXEC diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index b9e9e97..fc27d92 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -25,7 +25,7 @@ #include #include -#define DEBUG_HELPER +//#define DEBUG_HELPER #ifdef DEBUG_HELPER #define HPRINTF(...) printf(__VA_ARGS__) From ddc30aa90c99d25b58f5af9b3cf6248d1b54e513 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Thu, 21 May 2020 20:17:11 -0400 Subject: [PATCH 46/59] fixup! softmmu_template.h: no need for original mmio rw for symbolic mmio ports --- src/softmmu_template.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/softmmu_template.h b/src/softmmu_template.h index af57033..5915f78 100644 --- a/src/softmmu_template.h +++ b/src/softmmu_template.h @@ -426,7 +426,9 @@ void glue(glue(io_write, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr SE_SET_MEM_IO_VADDR(env, addr, 0); env->mem_io_pc = (uintptr_t) retaddr; +#ifdef CONFIG_SYMBEX if (likely(!g_sqi.mem.is_mmio_symbolic(addr, DATA_SIZE))) { +#endif #if SHIFT <= 2 ops->write(physaddr, val, 1 << SHIFT); #else @@ -438,7 +440,9 @@ void glue(glue(io_write, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr ops->write(physaddr + 4, val >> 32, 4); #endif #endif /* SHIFT > 2 */ +#ifdef CONFIG_SYMBEX } +#endif } void glue(glue(io_write_chk, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_phys_addr_t physaddr, DATA_TYPE val, From c2dccf88758cd41b399c613257694963a9882fce Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Thu, 21 May 2020 21:51:14 -0400 Subject: [PATCH 47/59] check env S2EDIR; Signed-off-by: weizhou-chaojixx --- src/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 88df9ae..421acc7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,6 +4,10 @@ # Make sure we include any source files in the target-specific directory file(GLOB TARGET_SRC_FILES ${TARGET_DIR}/*.c) +if(NOT DEFINED ENV{S2EDIR}) + message(FATAL_ERROR "not defined environment variable:S2EDIR") +endif() + add_library(cpu cpu-exec.c cpus.c exec.c From df5a3d1e35b54f1dc528998bb7c162faed15e3df Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Thu, 21 May 2020 22:26:37 -0400 Subject: [PATCH 48/59] fixup! check env S2EDIR; --- src/CMakeLists.txt | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 421acc7..a84b01d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,10 +4,6 @@ # Make sure we include any source files in the target-specific directory file(GLOB TARGET_SRC_FILES ${TARGET_DIR}/*.c) -if(NOT DEFINED ENV{S2EDIR}) - message(FATAL_ERROR "not defined environment variable:S2EDIR") -endif() - add_library(cpu cpu-exec.c cpus.c exec.c @@ -32,7 +28,7 @@ target_include_directories(cpu PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR} ${CMAKE_BINARY_DIR}/include - $ENV{S2EDIR}/qemu/include/nvic) + ${CMAKE_SOURCE_DIR}/../qemu/include/nvic) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__STDC_FORMAT_MACROS -D_GNU_SOURCE -DNEED_CPU_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DTARGET_PHYS_ADDR_BITS=64") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -fno-omit-frame-pointer") From c0bb0f51fde0f2044e21bb01162e77151f295e9a Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Thu, 21 May 2020 22:46:40 -0400 Subject: [PATCH 49/59] fixup! target-arm: add interrupt_flag in env to make sure the execution will not be frequently disrupted by interrupts --- src/cpu-exec.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 7780a4a..79472af 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -452,12 +452,16 @@ static bool process_interrupt_request(CPUArchState *env) { ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { int irq_num; if ((armv7m_nvic_can_take_pending_exception(env->nvic, &irq_num))) { +#ifdef CONFIG_SYMBEX if (likely(*g_sqi.mode.fast_concrete_invocation && **g_sqi.mode.running_concrete && *g_sqi.mode.allow_interrupt) || unlikely(*g_sqi.mode.allow_interrupt == 2) || unlikely(irq_num != 15)) { +#endif env->exception_index = EXCP_IRQ; do_interrupt(env); has_interrupt = true; +#ifdef CONFIG_SYMBEX } +#endif } else { DPRINTF("cpu basepri = %d take_exc = \n", env->v7m.basepri); } From e673a65c75a3ef3ee8907195ceb6cea3934ddd51 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sun, 24 May 2020 21:22:40 -0400 Subject: [PATCH 50/59] uncomment Signed-off-by: weizhou-chaojixx --- src/target-arm/translate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 0023a8e..35d5728 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -35,7 +35,7 @@ #include #endif -#define DEBUG_TS +//#define DEBUG_TS #ifdef DEBUG_TS #define TPRINTF(...) printf(__VA_ARGS__) From 2f5a1b2fb500d881e082ae220ff7b11d772eaaa3 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sun, 24 May 2020 21:24:12 -0400 Subject: [PATCH 51/59] helper: change armv7m_nvic_acknowledge_irq prototype to make sure trigger and active is the same irq num Signed-off-by: weizhou-chaojixx --- src/target-arm/helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index fc27d92..1299f00 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -769,7 +769,7 @@ void do_interrupt_v7m(CPUARMState *env) { return; case EXCP_IRQ: armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); - armv7m_nvic_acknowledge_irq(env->nvic); + armv7m_nvic_acknowledge_irq(env->nvic, exc); env->v7m.exception = exc; *exception = exc; env->interrupt_flag += 1; From dd147c01e4f211fe3862096b4f14f142f1ee4100 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 20 Jun 2020 16:13:18 -0400 Subject: [PATCH 52/59] fixup! target-arm: add interrupt_flag in env to make sure the execution will not be frequently disrupted by interrupts --- src/cpu-exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpu-exec.c b/src/cpu-exec.c index 79472af..d3a6866 100644 --- a/src/cpu-exec.c +++ b/src/cpu-exec.c @@ -463,7 +463,7 @@ static bool process_interrupt_request(CPUArchState *env) { } #endif } else { - DPRINTF("cpu basepri = %d take_exc = \n", env->v7m.basepri); + DPRINTF("cpu basepri = %d irq num = %d\n", env->v7m.basepri, irq_num); } } #endif From 82409e6806af027c1c192ac720f761dd3b03c99c Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 18 Jul 2020 11:58:53 -0400 Subject: [PATCH 53/59] target-arm/translate.c: add TB type Signed-off-by: weizhou-chaojixx --- src/target-arm/translate.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index 35d5728..a28b5e9 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -835,7 +835,7 @@ static const uint8_t table_logic_cc[16] = { /* Set PC and Thumb state from an immediate address. */ static inline void gen_bx_im(DisasContext *s, uint32_t addr) { TCGv tmp; - + SET_TB_TYPE(TB_CALL_IND); s->is_jmp = DISAS_UPDATE; if (s->thumb != (addr & 1)) { tmp = tcg_temp_new_i32(); @@ -7290,6 +7290,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { s->condlabel = gen_new_label(); gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; + SET_TB_TYPE(TB_COND_JMP); } if ((insn & 0x0f900000) == 0x03000000) { if ((insn & (1 << 21)) == 0) { @@ -8350,6 +8351,7 @@ static void disas_arm_insn(CPUARMState *env, DisasContext *s) { } offset = (((int32_t) insn << 8) >> 8); val += (offset << 2) + 4; + SET_TB_TYPE(TB_JMP); gen_jmp(s, val); } break; case 0xc: @@ -8693,6 +8695,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw /* Load. */ tmp = gen_ld32(addr, IS_USER(s)); if (i == 15) { + SET_TB_TYPE(TB_RET); gen_bx(s, tmp); if (IS_M(env)) { s->is_jmp = DISAS_BX_EXCRET; @@ -9096,6 +9099,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw offset += s->pc; if (insn & (1 << 12)) { /* b/bl */ + SET_TB_TYPE(TB_CALL); gen_jmp(s, offset); } else { /* blx */ @@ -9218,6 +9222,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw s->condlabel = gen_new_label(); gen_test_cc(op ^ 1, s->condlabel); s->condjmp = 1; + SET_TB_TYPE(TB_COND_JMP); /* offset[11:1] = insn[10:0] */ offset = (insn & 0x7ff) << 1; @@ -9231,6 +9236,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw offset |= (insn & (1 << 11)) << 8; /* jump to the offset */ + SET_TB_TYPE(TB_JMP); gen_jmp(s, s->pc + offset); } } else { @@ -9575,6 +9581,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { s->condlabel = gen_new_label(); gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; + SET_TB_TYPE(TB_COND_JMP); } } @@ -9706,6 +9713,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { if (insn & (1 << 7)) { ARCH(5); val = (uint32_t) s->pc | 1; + SET_TB_TYPE(TB_CALL_IND); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); @@ -10084,8 +10092,8 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { store_reg(s, 13, addr); /* set the new PC value */ if ((insn & 0x0900) == 0x0900) { + SET_TB_TYPE(TB_RET); store_reg_from_load(env, s, 15, tmp); - // To find how many other regs pop with pc TPRINTF("pc = 0x%x sp = 0x%x \n", env->regs[15], env->regs[13]); if (IS_M(env)) { s->is_jmp = DISAS_BX_EXCRET; @@ -10109,6 +10117,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; val = (uint32_t) s->pc + 2; val += offset; + SET_TB_TYPE(TB_JMP); gen_jmp(s, val); break; @@ -10257,6 +10266,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { val = (uint32_t) s->pc + 2; offset = ((int32_t) insn << 24) >> 24; val += offset << 1; + SET_TB_TYPE(TB_JMP); gen_jmp(s, val); break; @@ -10270,6 +10280,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { val = (uint32_t) s->pc; offset = ((int32_t) insn << 21) >> 21; val += (offset << 1) + 2; + SET_TB_TYPE(TB_JMP); gen_jmp(s, val); break; From 5a69042971ce42d78d3f6bfcf2d61dc7e0eaa891 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 18 Jul 2020 12:03:52 -0400 Subject: [PATCH 54/59] target-arm/translate.c: replace wfi irq continue execution Signed-off-by: weizhou-chaojixx --- src/target-arm/translate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index a28b5e9..cbd5978 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -10585,7 +10585,9 @@ static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationB /* nothing more to generate */ break; case DISAS_WFI: - gen_helper_wfi(); + // donot sleep + gen_goto_tb(dc, 1, dc->pc); + //gen_helper_wfi(); break; case DISAS_SWI: gen_exception(EXCP_SWI); From afef1432d138a2c454f58c87aedcb9cb2e8da6da Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Fri, 9 Oct 2020 19:20:22 -0400 Subject: [PATCH 55/59] fixup! target-arm/translate.c: add TB type --- src/target-arm/translate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/target-arm/translate.c b/src/target-arm/translate.c index cbd5978..c4aa518 100644 --- a/src/target-arm/translate.c +++ b/src/target-arm/translate.c @@ -9099,7 +9099,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw offset += s->pc; if (insn & (1 << 12)) { /* b/bl */ - SET_TB_TYPE(TB_CALL); + if (insn & (1 << 14)) { // exclude b.w caller + SET_TB_TYPE(TB_CALL); + } gen_jmp(s, offset); } else { /* blx */ From e18da50e5a46ff1580456baf79e7588a936dc576 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Fri, 18 Dec 2020 07:07:32 -0500 Subject: [PATCH 56/59] se_libcpu.h: add invalid_pc_access event Signed-off-by: weizhou-chaojixx --- include/cpu/se_libcpu.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index dd47320..421984f 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -217,6 +217,7 @@ struct se_libcpu_interface_t { unsigned *on_privilege_change_signals_count; unsigned *on_page_directory_change_signals_count; unsigned *on_call_return_signals_count; + unsigned *on_invalid_pc_access_signals_count; void (*on_privilege_change)(unsigned previous, unsigned current); void (*on_page_directory_change)(uint64_t previous, uint64_t current); @@ -268,6 +269,8 @@ struct se_libcpu_interface_t { uint64_t writeMask, int isMemoryAccess); int (*on_call_return_translate)(uint64_t pc, int isCall); + + void (*on_invalid_pc_access)(uint64_t addr); } events; struct { From 7acb9742409cfb8993041291abc991cbc00a5619 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Fri, 18 Dec 2020 07:08:09 -0500 Subject: [PATCH 57/59] exec.c: trigger invalid_pc_access event Signed-off-by: weizhou-chaojixx --- src/exec.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/exec.c b/src/exec.c index 5cb9a3d..6b486bf 100644 --- a/src/exec.c +++ b/src/exec.c @@ -542,6 +542,9 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) { if (!mem_desc_find(pd)) { #elif defined(TARGET_ARM) if (!mem_desc_find(addr)) { + if (unlikely(*g_sqi.events.on_invalid_pc_access_signals_count)) { + g_sqi.events.on_invalid_pc_access(addr); + } #else #error Unsupported target architecture #endif From 724800d5c1d3f0bb2ee4e966b669582ec5feb8b0 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 19 Dec 2020 04:16:40 -0500 Subject: [PATCH 58/59] se_libcpu.h: add on_armv7m_interrupt_exit event Signed-off-by: weizhou-chaojixx --- include/cpu/se_libcpu.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/cpu/se_libcpu.h b/include/cpu/se_libcpu.h index 421984f..5b5d3c6 100644 --- a/include/cpu/se_libcpu.h +++ b/include/cpu/se_libcpu.h @@ -211,6 +211,7 @@ struct se_libcpu_interface_t { unsigned *on_translate_instruction_end_signals_count; unsigned *on_translate_register_access_signals_count; unsigned *on_exception_signals_count; + unsigned *on_exception_exit_signals_count; unsigned *on_page_fault_signals_count; unsigned *on_tlb_miss_signals_count; unsigned *on_port_access_signals_count; @@ -271,6 +272,8 @@ struct se_libcpu_interface_t { int (*on_call_return_translate)(uint64_t pc, int isCall); void (*on_invalid_pc_access)(uint64_t addr); + + void (*on_armv7m_interrupt_exit)(uint64_t irqNo); } events; struct { From 5307ae5ffe90c72b0fdd2c732039b4144006a322 Mon Sep 17 00:00:00 2001 From: weizhou-chaojixx Date: Sat, 19 Dec 2020 04:17:11 -0500 Subject: [PATCH 59/59] helper.c: trigger on_armv7m_interrupt_exit event Signed-off-by: weizhou-chaojixx --- src/target-arm/helper.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/target-arm/helper.c b/src/target-arm/helper.c index 1299f00..5ce0e58 100644 --- a/src/target-arm/helper.c +++ b/src/target-arm/helper.c @@ -684,8 +684,10 @@ static void switch_v7m_sp(CPUARMState *env, int process) { static void do_v7m_exception_exit(CPUARMState *env) { uint32_t type; uint32_t xpsr; + uint32_t irq_no; type = env->regs[15]; + irq_no = env->v7m.exception; if (env->v7m.exception != 0) armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, false); @@ -713,6 +715,9 @@ static void do_v7m_exception_exit(CPUARMState *env) { if (env->interrupt_flag > 0) env->interrupt_flag -= 1; HPRINTF(" interrupt exit r13 = 0x%x r15 = 0x%x flag = %d\n", env->regs[13], env->regs[15], env->interrupt_flag); + if (unlikely(*g_sqi.events.on_invalid_pc_access_signals_count)) { + g_sqi.events.on_armv7m_interrupt_exit(irq_no); + } /* ??? The exception return type specifies Thread/Handler mode. However this is also implied by the xPSR value. Not sure what to do if there is a mismatch. */