fix some oss-fuzz (#1258)

* fix oss-fuzz 22107.

* fix oss-fuzz 22112.

* clean up build target.

* fix oss-fuzz 22226.

* fix oss-fuzz 22227.

* fix oss-fuzz 11640.

* fix oss-fuzz 20772.
This commit is contained in:
Chen Huitao
2020-05-16 10:38:16 +08:00
committed by GitHub
parent 94c94cdff0
commit 08240d5453
4 changed files with 28 additions and 22 deletions

View File

@ -124,7 +124,7 @@ static int32 roundAndPackInt32( flag zSign, uint64_t absZ STATUS_PARAM)
roundIncrement = zSign ? 0x7f : 0; roundIncrement = zSign ? 0x7f : 0;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
roundBits = absZ & 0x7F; roundBits = absZ & 0x7F;
absZ = ( absZ + roundIncrement )>>7; absZ = ( absZ + roundIncrement )>>7;
@ -175,7 +175,7 @@ static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATU
increment = zSign && absZ1; increment = zSign && absZ1;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
if ( increment ) { if ( increment ) {
++absZ0; ++absZ0;
@ -229,7 +229,7 @@ static int64 roundAndPackUint64(flag zSign, uint64_t absZ0,
increment = zSign && absZ1; increment = zSign && absZ1;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
if (increment) { if (increment) {
++absZ0; ++absZ0;
@ -382,7 +382,7 @@ static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig
roundIncrement = zSign ? 0x7f : 0; roundIncrement = zSign ? 0x7f : 0;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
break; break;
} }
roundBits = zSig & 0x7F; roundBits = zSig & 0x7F;
@ -567,7 +567,7 @@ static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig
roundIncrement = zSign ? 0x3ff : 0; roundIncrement = zSign ? 0x3ff : 0;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
roundBits = zSig & 0x3FF; roundBits = zSig & 0x3FF;
if ( 0x7FD <= (uint16_t) zExp ) { if ( 0x7FD <= (uint16_t) zExp ) {
@ -751,7 +751,7 @@ static floatx80
roundIncrement = zSign ? roundMask : 0; roundIncrement = zSign ? roundMask : 0;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
roundBits = zSig0 & roundMask; roundBits = zSig0 & roundMask;
if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) {
@ -813,7 +813,7 @@ static floatx80
increment = zSign && zSig1; increment = zSign && zSig1;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) {
if ( ( 0x7FFE < zExp ) if ( ( 0x7FFE < zExp )
@ -858,7 +858,7 @@ static floatx80
increment = zSign && zSig1; increment = zSign && zSig1;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
if ( increment ) { if ( increment ) {
++zSig0; ++zSig0;
@ -1073,7 +1073,7 @@ static float128
increment = zSign && zSig2; increment = zSign && zSig2;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
if ( 0x7FFD <= (uint32_t) zExp ) { if ( 0x7FFD <= (uint32_t) zExp ) {
if ( ( 0x7FFD < zExp ) if ( ( 0x7FFD < zExp )
@ -1136,7 +1136,7 @@ static float128
increment = zSign && zSig2; increment = zSign && zSig2;
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
} }
} }
@ -1856,7 +1856,7 @@ float32 float32_round_to_int( float32 a STATUS_PARAM)
} }
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
z &= ~ roundBitsMask; z &= ~ roundBitsMask;
if ( z != float32_val(a) ) STATUS(float_exception_flags) |= float_flag_inexact; if ( z != float32_val(a) ) STATUS(float_exception_flags) |= float_flag_inexact;
@ -3588,7 +3588,7 @@ float64 float64_round_to_int( float64 a STATUS_PARAM )
} }
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
z &= ~ roundBitsMask; z &= ~ roundBitsMask;
if ( z != float64_val(a) ) if ( z != float64_val(a) )
@ -4936,7 +4936,7 @@ floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM )
} }
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
z.low &= ~ roundBitsMask; z.low &= ~ roundBitsMask;
if ( z.low == 0 ) { if ( z.low == 0 ) {
@ -6057,7 +6057,7 @@ float128 float128_round_to_int( float128 a STATUS_PARAM )
} }
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
z.low &= ~ roundBitsMask; z.low &= ~ roundBitsMask;
} }
@ -6121,7 +6121,7 @@ float128 float128_round_to_int( float128 a STATUS_PARAM )
} }
break; break;
default: default:
abort(); float_raise(float_flag_invalid STATUS_VAR);
} }
z.high &= ~ roundBitsMask; z.high &= ~ roundBitsMask;
} }

View File

@ -884,7 +884,7 @@ NEON_VOP_ENV(qshl_s32, neon_s32, 1)
uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
{ {
int8_t shift = (uint8_t)shiftop; int8_t shift = (uint8_t)shiftop;
int64_t val = valop; uint64_t val = valop;
if (shift >= 64) { if (shift >= 64) {
if (val) { if (val) {
SET_QC(); SET_QC();

View File

@ -365,7 +365,7 @@ static inline int satsw(int x)
#define FMULLW(a, b) ((int64_t)(a) * (b)) #define FMULLW(a, b) ((int64_t)(a) * (b))
#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
#define FMULHUW(a, b) ((a) * (b) >> 16) #define FMULHUW(a, b) ((int64_t)(a) * (b) >> 16)
#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
#define FAVG(a, b) (((a) + (b) + 1) >> 1) #define FAVG(a, b) (((a) + (b) + 1) >> 1)

View File

@ -2394,7 +2394,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
if (allocate_args) { if (allocate_args) {
/* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
preallocate call stack */ preallocate call stack */
tcg_abort(); return -1;
} }
stack_offset = TCG_TARGET_CALL_STACK_OFFSET; stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
@ -2420,7 +2420,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
tcg_out_movi(s, ts->type, reg, ts->val); tcg_out_movi(s, ts->type, reg, ts->val);
tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
} else { } else {
tcg_abort(); return -1;
} }
} }
#ifndef TCG_TARGET_STACK_GROWSUP #ifndef TCG_TARGET_STACK_GROWSUP
@ -2446,7 +2446,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
/* XXX: sign extend ? */ /* XXX: sign extend ? */
tcg_out_movi(s, ts->type, reg, ts->val); tcg_out_movi(s, ts->type, reg, ts->val);
} else { } else {
tcg_abort(); return -1;
} }
tcg_regset_set_reg(allocated_regs, reg); tcg_regset_set_reg(allocated_regs, reg);
} }
@ -2530,6 +2530,7 @@ static inline int tcg_gen_code_common(TCGContext *s,
int op_index; int op_index;
const TCGOpDef *def; const TCGOpDef *def;
const TCGArg *args; const TCGArg *args;
int ret;
#ifdef DEBUG_DISAS #ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
@ -2622,16 +2623,21 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_out_label(s, args[0], s->code_ptr); tcg_out_label(s, args[0], s->code_ptr);
break; break;
case INDEX_op_call: case INDEX_op_call:
args += tcg_reg_alloc_call(s, def, opc, args, ret = tcg_reg_alloc_call(s, def, opc, args,
s->op_dead_args[op_index], s->op_dead_args[op_index],
s->op_sync_args[op_index]); s->op_sync_args[op_index]);
if (ret == -1) {
goto the_end;
} else {
args += ret;
}
goto next; goto next;
case INDEX_op_end: case INDEX_op_end:
goto the_end; goto the_end;
default: default:
/* Sanity check that we've not introduced any unhandled opcodes. */ /* Sanity check that we've not introduced any unhandled opcodes. */
if (def->flags & TCG_OPF_NOT_PRESENT) { if (def->flags & TCG_OPF_NOT_PRESENT) {
tcg_abort(); goto the_end;
} }
/* Note: in order to speed up the code, it would be much /* Note: in order to speed up the code, it would be much
faster to have specialized register allocator functions for faster to have specialized register allocator functions for