mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-14 19:08:08 +00:00
gcc cmplog fix
This commit is contained in:
@ -27,8 +27,9 @@
|
||||
- added collision free caller instrumentation to LTO mode. activate with
|
||||
`AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single
|
||||
block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0)
|
||||
- fix for GCC_PLUGIN cmplog that broke on std::strings
|
||||
- afl-whatsup:
|
||||
- Now also displays current average speed
|
||||
- now also displays current average speed
|
||||
- small bugfixes
|
||||
- Minor edits to afl-persistent-config
|
||||
- Prevent temporary files being left behind on aborted afl-whatsup
|
||||
|
@ -207,7 +207,7 @@ static __maybe_unused __always_inline unsigned e2k_add64carry_first(
|
||||
return (unsigned)__builtin_e2k_addcd_c(base, addend, 0);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_first(base, addend, sum) \
|
||||
e2k_add64carry_first(base, addend, sum)
|
||||
|
||||
@ -218,7 +218,7 @@ static __maybe_unused __always_inline unsigned e2k_add64carry_next(
|
||||
return (unsigned)__builtin_e2k_addcd_c(base, addend, carry);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_next(carry, base, addend, sum) \
|
||||
e2k_add64carry_next(carry, base, addend, sum)
|
||||
|
||||
@ -230,7 +230,7 @@ static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry,
|
||||
*sum = __builtin_e2k_addcd(base, addend, carry);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_last(carry, base, addend, sum) \
|
||||
e2k_add64carry_last(carry, base, addend, sum)
|
||||
#endif /* __iset__ >= 5 */
|
||||
@ -311,7 +311,7 @@ static __forceinline char msvc32_add64carry_first(uint64_t base,
|
||||
base_32h, addend_32h, sum32 + 1);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_first(base, addend, sum) \
|
||||
msvc32_add64carry_first(base, addend, sum)
|
||||
|
||||
@ -328,7 +328,7 @@ static __forceinline char msvc32_add64carry_next(char carry, uint64_t base,
|
||||
base_32h, addend_32h, sum32 + 1);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_next(carry, base, addend, sum) \
|
||||
msvc32_add64carry_next(carry, base, addend, sum)
|
||||
|
||||
@ -345,7 +345,7 @@ static __forceinline void msvc32_add64carry_last(char carry, uint64_t base,
|
||||
addend_32h, sum32 + 1);
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define add64carry_last(carry, base, addend, sum) \
|
||||
msvc32_add64carry_last(carry, base, addend, sum)
|
||||
#endif /* _MSC_FULL_VER >= 190024231 */
|
||||
@ -454,7 +454,7 @@ typedef struct {
|
||||
uint64_t unaligned_64;
|
||||
|
||||
} __attribute__((__packed__)) t1ha_unaligned_proxy;
|
||||
\
|
||||
|
||||
#define read_unaligned(ptr, bits) \
|
||||
(((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
|
||||
t1ha_unaligned_proxy, unaligned_##bits))) \
|
||||
@ -539,6 +539,7 @@ static __always_inline const uint64_t *__attribute__((
|
||||
(void)(ptr); \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
#endif /* prefetch */
|
||||
|
||||
|
@ -1734,7 +1734,7 @@ XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src);
|
||||
* These declarations should only be used with static linking.
|
||||
* Never use them in association with dynamic linking!
|
||||
*****************************************************************************
|
||||
*/
|
||||
*/
|
||||
|
||||
/*
|
||||
* These definitions are only present to allow static allocation
|
||||
@ -2399,7 +2399,7 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed(
|
||||
#define XXH_NO_STREAM
|
||||
#undef XXH_NO_STREAM /* don't actually */
|
||||
#endif /* XXH_DOXYGEN */
|
||||
/*!
|
||||
/*!
|
||||
* @}
|
||||
*/
|
||||
|
||||
@ -2614,6 +2614,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) {
|
||||
_Static_assert((c), m); \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
|
||||
#define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \
|
||||
do { \
|
||||
@ -2621,6 +2622,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) {
|
||||
static_assert((c), m); \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \
|
||||
do { \
|
||||
@ -2632,6 +2634,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) {
|
||||
}; \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
#define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c), #c)
|
||||
#endif
|
||||
@ -2850,7 +2853,7 @@ static int XXH_isLittleEndian(void) {
|
||||
return one.c[0];
|
||||
|
||||
}
|
||||
\
|
||||
|
||||
#define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
|
||||
#endif
|
||||
#endif
|
||||
@ -4679,6 +4682,7 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) {
|
||||
acc = svadd_u64_x(mask, acc, mul); \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#endif /* XXH_VECTOR == XXH_SVE */
|
||||
|
||||
/* prefetch
|
||||
@ -4737,10 +4741,12 @@ static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
|
||||
|
||||
};
|
||||
|
||||
static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!<
|
||||
static const xxh_u64 PRIME_MX1 =
|
||||
0x165667919E3779F9ULL; /*!<
|
||||
0b0001011001010110011001111001000110011110001101110111100111111001
|
||||
*/
|
||||
static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!<
|
||||
static const xxh_u64 PRIME_MX2 =
|
||||
0x9FB21C651E98DF25ULL; /*!<
|
||||
0b1001111110110010000111000110010100011110100110001101111100100101
|
||||
*/
|
||||
|
||||
|
@ -180,19 +180,19 @@ struct afl_cmptrs_pass : afl_base_pass {
|
||||
c = DECL_CONTEXT(c);
|
||||
if (c && TREE_CODE(c) != TRANSLATION_UNIT_DECL) return false;
|
||||
|
||||
/* Check that the first nonstatic data member of the record type
|
||||
/* Check that the first nonstatic named data member of the record type
|
||||
is named _M_dataplus. */
|
||||
for (c = TYPE_FIELDS(t); c; c = DECL_CHAIN(c))
|
||||
if (TREE_CODE(c) == FIELD_DECL) break;
|
||||
if (TREE_CODE(c) == FIELD_DECL && DECL_NAME(c)) break;
|
||||
if (!c || !integer_zerop(DECL_FIELD_BIT_OFFSET(c)) ||
|
||||
strcmp(IDENTIFIER_POINTER(DECL_NAME(c)), "_M_dataplus") != 0)
|
||||
return false;
|
||||
|
||||
/* Check that the second nonstatic data member of the record type
|
||||
/* Check that the second nonstatic named data member of the record type
|
||||
is named _M_string_length. */
|
||||
tree f2;
|
||||
for (f2 = DECL_CHAIN(c); f2; f2 = DECL_CHAIN(f2))
|
||||
if (TREE_CODE(f2) == FIELD_DECL) break;
|
||||
if (TREE_CODE(f2) == FIELD_DECL && DECL_NAME(f2)) break;
|
||||
if (!f2 /* No need to check this field's offset. */
|
||||
|| strcmp(IDENTIFIER_POINTER(DECL_NAME(f2)), "_M_string_length") != 0)
|
||||
return false;
|
||||
@ -208,9 +208,12 @@ struct afl_cmptrs_pass : afl_base_pass {
|
||||
strcmp(IDENTIFIER_POINTER(TYPE_IDENTIFIER(c)), "_Alloc_hider") != 0)
|
||||
return false;
|
||||
|
||||
/* And its first data member is named _M_p. */
|
||||
/* And its first nonstatic named data member should be named _M_p.
|
||||
There may be (unnamed) subobjects from empty base classes. We
|
||||
skip the subobjects, then check the offset of the first data
|
||||
member. */
|
||||
for (c = TYPE_FIELDS(c); c; c = DECL_CHAIN(c))
|
||||
if (TREE_CODE(c) == FIELD_DECL) break;
|
||||
if (TREE_CODE(c) == FIELD_DECL && DECL_NAME(c)) break;
|
||||
if (!c || !integer_zerop(DECL_FIELD_BIT_OFFSET(c)) ||
|
||||
strcmp(IDENTIFIER_POINTER(DECL_NAME(c)), "_M_p") != 0)
|
||||
return false;
|
||||
|
@ -828,7 +828,8 @@ static void instrument_mode_old_environ(aflcc_state_t *aflcc) {
|
||||
}
|
||||
|
||||
if (getenv("AFL_LLVM_CTX")) aflcc->instrument_opt_mode |= INSTRUMENT_OPT_CTX;
|
||||
if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_LTO_CALLER") || getenv("AFL_LLVM_LTO_CTX"))
|
||||
if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_LTO_CALLER") ||
|
||||
getenv("AFL_LLVM_LTO_CTX"))
|
||||
aflcc->instrument_opt_mode |= INSTRUMENT_OPT_CALLER;
|
||||
|
||||
if (getenv("AFL_LLVM_NGRAM_SIZE")) {
|
||||
|
@ -2493,17 +2493,15 @@ int main(int argc, char **argv_orig, char **envp) {
|
||||
|
||||
for (entry = 0; entry < afl->queued_items; ++entry)
|
||||
if (!afl->queue_buf[entry]->disabled)
|
||||
if ((afl->queue_buf[entry]->exec_us/1000) > max_ms)
|
||||
max_ms = afl->queue_buf[entry]->exec_us/1000;
|
||||
if ((afl->queue_buf[entry]->exec_us / 1000) > max_ms)
|
||||
max_ms = afl->queue_buf[entry]->exec_us / 1000;
|
||||
|
||||
// Add 20% as a safety margin, capped to exec_tmout given in -t option
|
||||
max_ms *= 1.2;
|
||||
if(max_ms > afl->fsrv.exec_tmout)
|
||||
max_ms = afl->fsrv.exec_tmout;
|
||||
if (max_ms > afl->fsrv.exec_tmout) max_ms = afl->fsrv.exec_tmout;
|
||||
|
||||
// Ensure that there is a sensible timeout even for very fast binaries
|
||||
if(max_ms < 5)
|
||||
max_ms = 5;
|
||||
if (max_ms < 5) max_ms = 5;
|
||||
|
||||
afl->fsrv.exec_tmout = max_ms;
|
||||
afl->timeout_given = 1;
|
||||
|
Reference in New Issue
Block a user