mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-14 19:08:08 +00:00
fix #24 checking for validity of the requested block address
This commit is contained in:
@ -271,6 +271,25 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, ui
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Check if an address is valid in the current mapping */
|
||||||
|
|
||||||
|
static inline int is_valid_addr(target_ulong addr) {
|
||||||
|
|
||||||
|
int l, flags;
|
||||||
|
target_ulong page;
|
||||||
|
void * p;
|
||||||
|
|
||||||
|
page = addr & TARGET_PAGE_MASK;
|
||||||
|
l = (page + TARGET_PAGE_SIZE) - addr;
|
||||||
|
|
||||||
|
flags = page_get_flags(page);
|
||||||
|
if (!(flags & PAGE_VALID) || !(flags & PAGE_READ))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* This is the other side of the same channel. Since timeouts are handled by
|
/* This is the other side of the same channel. Since timeouts are handled by
|
||||||
afl-fuzz simply killing the child, we can just wait until the pipe breaks. */
|
afl-fuzz simply killing the child, we can just wait until the pipe breaks. */
|
||||||
|
|
||||||
@ -282,6 +301,8 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
|
|||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
|
||||||
|
u8 invalid_pc = 0;
|
||||||
|
|
||||||
/* Broken pipe means it's time to return to the fork server routine. */
|
/* Broken pipe means it's time to return to the fork server routine. */
|
||||||
|
|
||||||
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
|
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
|
||||||
@ -290,21 +311,36 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
|
|||||||
tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
|
tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
|
||||||
|
|
||||||
if(!tb) {
|
if(!tb) {
|
||||||
|
|
||||||
|
/* The child may request to transate a block of memory that is not
|
||||||
|
mapped in the parent (e.g. jitted code or dlopened code).
|
||||||
|
This causes a SIGSEV in gen_intermediate_code() and associated
|
||||||
|
subroutines. We simply avoid caching of such blocks. */
|
||||||
|
|
||||||
|
if (is_valid_addr(t.tb.pc)) {
|
||||||
|
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, 0);
|
tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, 0);
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
|
} else {
|
||||||
|
|
||||||
|
invalid_pc = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t.is_chain) {
|
if (t.is_chain) {
|
||||||
if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
|
if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (!invalid_pc) {
|
||||||
|
|
||||||
last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
|
last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
|
||||||
c.last_tb.flags, c.cf_mask);
|
c.last_tb.flags, c.cf_mask);
|
||||||
if (last_tb) {
|
if (last_tb) {
|
||||||
tb_add_jump(last_tb, c.tb_exit, tb);
|
tb_add_jump(last_tb, c.tb_exit, tb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user