mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-14 02:58:08 +00:00
autoformat with black
This commit is contained in:
@ -1,32 +1,49 @@
|
||||
#!/usr/bin/python3
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
from elftools.elf.elffile import ELFFile
|
||||
|
||||
|
||||
def process_file(file, section, base):
|
||||
with open(file, 'rb') as f:
|
||||
with open(file, "rb") as f:
|
||||
for sect in ELFFile(f).iter_sections():
|
||||
if (sect.name == section):
|
||||
start = base + sect.header['sh_offset']
|
||||
end = start + sect.header['sh_size']
|
||||
print ("0x%016x-0x%016x" % (start, end))
|
||||
if sect.name == section:
|
||||
start = base + sect.header["sh_offset"]
|
||||
end = start + sect.header["sh_size"]
|
||||
print("0x%016x-0x%016x" % (start, end))
|
||||
return
|
||||
|
||||
print ("Section '%s' not found in '%s'" % (section, file))
|
||||
print("Section '%s' not found in '%s'" % (section, file))
|
||||
|
||||
|
||||
def hex_value(x):
|
||||
return int(x, 16)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
||||
parser.add_argument('-f', '--file', dest='file', type=str,
|
||||
help='elf file name', required=True)
|
||||
parser.add_argument('-s', '--section', dest='section', type=str,
|
||||
help='elf section name', required=True)
|
||||
parser.add_argument('-b', '--base', dest='base', type=hex_value,
|
||||
help='elf base address', required=True)
|
||||
parser = argparse.ArgumentParser(description="Process some integers.")
|
||||
parser.add_argument(
|
||||
"-f", "--file", dest="file", type=str, help="elf file name", required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--section",
|
||||
dest="section",
|
||||
type=str,
|
||||
help="elf section name",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--base",
|
||||
dest="base",
|
||||
type=hex_value,
|
||||
help="elf base address",
|
||||
required=True,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
process_file (args.file, args.section, args.base)
|
||||
process_file(args.file, args.section, args.base)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
@ -34,13 +34,11 @@ import ida_segment
|
||||
|
||||
|
||||
class ContextLoaderError(Exception):
|
||||
"""Base "catch all" exception for this script
|
||||
"""
|
||||
"""Base "catch all" exception for this script"""
|
||||
|
||||
|
||||
class ArchNotSupportedError(ContextLoaderError):
|
||||
"""Exception raised if the input file CPU architecture isn't supported fully
|
||||
"""
|
||||
"""Exception raised if the input file CPU architecture isn't supported fully"""
|
||||
|
||||
|
||||
def parse_mapping_index(filepath: str):
|
||||
@ -51,13 +49,16 @@ def parse_mapping_index(filepath: str):
|
||||
"""
|
||||
|
||||
if filepath is None:
|
||||
raise ContextLoaderError('_index.json file was not selected')
|
||||
raise ContextLoaderError("_index.json file was not selected")
|
||||
|
||||
try:
|
||||
with open(filepath, 'rb') as _file:
|
||||
with open(filepath, "rb") as _file:
|
||||
return json.load(_file)
|
||||
except Exception as ex:
|
||||
raise ContextLoaderError('Failed to parse json file {}'.format(filepath)) from ex
|
||||
raise ContextLoaderError(
|
||||
"Failed to parse json file {}".format(filepath)
|
||||
) from ex
|
||||
|
||||
|
||||
def get_input_name():
|
||||
"""Get the name of the input file
|
||||
@ -68,19 +69,21 @@ def get_input_name():
|
||||
input_filepath = ida_nalt.get_input_file_path()
|
||||
return Path(input_filepath).name
|
||||
|
||||
|
||||
def write_segment_bytes(start: int, filepath: str):
|
||||
""""Read data from context file and write it to the IDA segment
|
||||
""" "Read data from context file and write it to the IDA segment
|
||||
|
||||
:param start: Start address
|
||||
:param filepath: Path to context file
|
||||
"""
|
||||
|
||||
with open(filepath, 'rb') as _file:
|
||||
with open(filepath, "rb") as _file:
|
||||
data = _file.read()
|
||||
|
||||
decompressed_data = zlib.decompress(data)
|
||||
ida_bytes.put_bytes(start, decompressed_data)
|
||||
|
||||
|
||||
def create_segment(context_dir: str, segment: dict, is_be: bool):
|
||||
"""Create segment in IDA and map in the data from the file
|
||||
|
||||
@ -90,23 +93,30 @@ def create_segment(context_dir: str, segment: dict, is_be: bool):
|
||||
"""
|
||||
|
||||
input_name = get_input_name()
|
||||
if Path(segment['name']).name != input_name:
|
||||
if Path(segment["name"]).name != input_name:
|
||||
ida_seg = idaapi.segment_t()
|
||||
ida_seg.start_ea = segment['start']
|
||||
ida_seg.end_ea = segment['end']
|
||||
ida_seg.start_ea = segment["start"]
|
||||
ida_seg.end_ea = segment["end"]
|
||||
ida_seg.bitness = 1 if is_be else 0
|
||||
if segment['permissions']['r']:
|
||||
if segment["permissions"]["r"]:
|
||||
ida_seg.perm |= ida_segment.SEGPERM_READ
|
||||
if segment['permissions']['w']:
|
||||
if segment["permissions"]["w"]:
|
||||
ida_seg.perm |= ida_segment.SEGPERM_WRITE
|
||||
if segment['permissions']['x']:
|
||||
if segment["permissions"]["x"]:
|
||||
ida_seg.perm |= ida_segment.SEGPERM_EXEC
|
||||
idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'CODE', idaapi.ADDSEG_OR_DIE)
|
||||
idaapi.add_segm_ex(
|
||||
ida_seg, Path(segment["name"]).name, "CODE", idaapi.ADDSEG_OR_DIE
|
||||
)
|
||||
else:
|
||||
idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'DATA', idaapi.ADDSEG_OR_DIE)
|
||||
idaapi.add_segm_ex(
|
||||
ida_seg, Path(segment["name"]).name, "DATA", idaapi.ADDSEG_OR_DIE
|
||||
)
|
||||
|
||||
if segment["content_file"]:
|
||||
write_segment_bytes(
|
||||
segment["start"], PurePath(context_dir, segment["content_file"])
|
||||
)
|
||||
|
||||
if segment['content_file']:
|
||||
write_segment_bytes(segment['start'], PurePath(context_dir, segment['content_file']))
|
||||
|
||||
def create_segments(index: dict, context_dir: str):
|
||||
"""Iterate segments in index JSON, create the segment in IDA, and map in the data from the file
|
||||
@ -117,9 +127,10 @@ def create_segments(index: dict, context_dir: str):
|
||||
|
||||
info = idaapi.get_inf_structure()
|
||||
is_be = info.is_be()
|
||||
for segment in index['segments']:
|
||||
for segment in index["segments"]:
|
||||
create_segment(context_dir, segment, is_be)
|
||||
|
||||
|
||||
def rebase_program(index: dict):
|
||||
"""Rebase the program to the offset specified in the context _index.json
|
||||
|
||||
@ -128,20 +139,21 @@ def rebase_program(index: dict):
|
||||
|
||||
input_name = get_input_name()
|
||||
new_base = None
|
||||
for segment in index['segments']:
|
||||
if not segment['name']:
|
||||
for segment in index["segments"]:
|
||||
if not segment["name"]:
|
||||
continue
|
||||
|
||||
segment_name = Path(segment['name']).name
|
||||
segment_name = Path(segment["name"]).name
|
||||
if input_name == segment_name:
|
||||
new_base = segment['start']
|
||||
new_base = segment["start"]
|
||||
break
|
||||
|
||||
if not new_base:
|
||||
raise ContextLoaderError('Input file is not in _index.json')
|
||||
raise ContextLoaderError("Input file is not in _index.json")
|
||||
|
||||
current_base = idaapi.get_imagebase()
|
||||
ida_segment.rebase_program(new_base-current_base, 8)
|
||||
ida_segment.rebase_program(new_base - current_base, 8)
|
||||
|
||||
|
||||
def get_pc_by_arch(index: dict) -> int:
|
||||
"""Queries the input file CPU architecture and attempts to lookup the address of the program
|
||||
@ -153,13 +165,14 @@ def get_pc_by_arch(index: dict) -> int:
|
||||
|
||||
progctr = None
|
||||
info = idaapi.get_inf_structure()
|
||||
if info.procname == 'metapc':
|
||||
if info.procname == "metapc":
|
||||
if info.is_64bit():
|
||||
progctr = index['regs']['rax']
|
||||
progctr = index["regs"]["rax"]
|
||||
elif info.is_32bit():
|
||||
progctr = index['regs']['eax']
|
||||
progctr = index["regs"]["eax"]
|
||||
return progctr
|
||||
|
||||
|
||||
def write_reg_info(index: dict):
|
||||
"""Write register info as line comment at instruction pointed to by the program counter and
|
||||
change focus to that location
|
||||
@ -167,17 +180,19 @@ def write_reg_info(index: dict):
|
||||
:param index: _index.json JSON data
|
||||
"""
|
||||
|
||||
cmt = ''
|
||||
for reg, val in index['regs'].items():
|
||||
cmt = ""
|
||||
for reg, val in index["regs"].items():
|
||||
cmt += f"{reg.ljust(6)} : {hex(val)}\n"
|
||||
|
||||
progctr = get_pc_by_arch(index)
|
||||
if progctr is None:
|
||||
raise ArchNotSupportedError(
|
||||
'Architecture not fully supported, skipping register status comment')
|
||||
"Architecture not fully supported, skipping register status comment"
|
||||
)
|
||||
ida_bytes.set_cmt(progctr, cmt, 0)
|
||||
ida_kernwin.jumpto(progctr)
|
||||
|
||||
|
||||
def main(filepath):
|
||||
"""Main - parse _index.json input and map context files into the database
|
||||
|
||||
@ -193,5 +208,6 @@ def main(filepath):
|
||||
except ContextLoaderError as ex:
|
||||
print(ex)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(ida_kernwin.ask_file(1, '*.json', 'Import file name'))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(ida_kernwin.ask_file(1, "*.json", "Import file name"))
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
import os
|
||||
import string
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import errno
|
||||
import struct
|
||||
@ -21,6 +21,7 @@ import subprocess
|
||||
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
@ -28,109 +29,118 @@ def ensure_dir(dir):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt" ))
|
||||
|
||||
#parser.add_argument("tokenpath",
|
||||
#help="Destination directory for tokens")
|
||||
parser.add_argument("cur",
|
||||
help = "Current Path")
|
||||
parser.add_argument("db",
|
||||
help = "CodeQL database Path")
|
||||
parser.add_argument("tokenpath",
|
||||
help="Destination directory for tokens")
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt"
|
||||
)
|
||||
)
|
||||
|
||||
# parser.add_argument("tokenpath",
|
||||
# help="Destination directory for tokens")
|
||||
parser.add_argument("cur", help="Current Path")
|
||||
parser.add_argument("db", help="CodeQL database Path")
|
||||
parser.add_argument("tokenpath", help="Destination directory for tokens")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
def static_analysis(file,file2,cur,db) :
|
||||
with open(cur+"/"+file, "w") as f:
|
||||
print(cur+"/"+file)
|
||||
stream = os.popen("codeql query run " + cur +"/"+ file2 + " -d " + db )
|
||||
|
||||
def static_analysis(file, file2, cur, db):
|
||||
with open(cur + "/" + file, "w") as f:
|
||||
print(cur + "/" + file)
|
||||
stream = os.popen("codeql query run " + cur + "/" + file2 + " -d " + db)
|
||||
output = stream.read()
|
||||
f.write(output)
|
||||
f.close()
|
||||
|
||||
def copy_tokens(cur, tokenpath) :
|
||||
subprocess.call(["mv " + cur + "/" + "strcmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
|
||||
subprocess.call(["mv " + cur + "/" + "strncmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
|
||||
subprocess.call(["mv " + cur + "/" + "memcmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
|
||||
subprocess.call(["mv " + cur + "/" + "lits/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
|
||||
subprocess.call(["mv " + cur + "/" + "strtool-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
|
||||
subprocess.call(["rm -rf strcmp-strs memcmp-strs strncmp-strs lits strtool-strs"],shell=True)
|
||||
subprocess.call(["rm *.out"],shell=True)
|
||||
subprocess.call(["find "+tokenpath+" -size 0 -delete"],shell=True)
|
||||
|
||||
def copy_tokens(cur, tokenpath):
|
||||
subprocess.call(
|
||||
["mv " + cur + "/" + "strcmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
|
||||
shell=True,
|
||||
)
|
||||
subprocess.call(
|
||||
["mv " + cur + "/" + "strncmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
|
||||
shell=True,
|
||||
)
|
||||
subprocess.call(
|
||||
["mv " + cur + "/" + "memcmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
|
||||
shell=True,
|
||||
)
|
||||
subprocess.call(
|
||||
["mv " + cur + "/" + "lits/*" + " " + cur + "/" + tokenpath + "/."], shell=True
|
||||
)
|
||||
subprocess.call(
|
||||
["mv " + cur + "/" + "strtool-strs/*" + " " + cur + "/" + tokenpath + "/."],
|
||||
shell=True,
|
||||
)
|
||||
subprocess.call(
|
||||
["rm -rf strcmp-strs memcmp-strs strncmp-strs lits strtool-strs"], shell=True
|
||||
)
|
||||
subprocess.call(["rm *.out"], shell=True)
|
||||
subprocess.call(["find " + tokenpath + " -size 0 -delete"], shell=True)
|
||||
|
||||
|
||||
|
||||
def codeql_analysis(cur, db) :
|
||||
static_analysis("litout.out","litool.ql", cur, db)
|
||||
static_analysis("strcmp-strings.out","strcmp-str.ql", cur, db)
|
||||
static_analysis("strncmp-strings.out","strncmp-str.ql", cur, db)
|
||||
static_analysis("memcmp-strings.out","memcmp-str.ql", cur, db)
|
||||
static_analysis("strtool-strings.out","strtool.ql", cur, db)
|
||||
start_autodict(0,cur)
|
||||
|
||||
def codeql_analysis(cur, db):
|
||||
static_analysis("litout.out", "litool.ql", cur, db)
|
||||
static_analysis("strcmp-strings.out", "strcmp-str.ql", cur, db)
|
||||
static_analysis("strncmp-strings.out", "strncmp-str.ql", cur, db)
|
||||
static_analysis("memcmp-strings.out", "memcmp-str.ql", cur, db)
|
||||
static_analysis("strtool-strings.out", "strtool.ql", cur, db)
|
||||
start_autodict(0, cur)
|
||||
|
||||
|
||||
def start_autodict(tokenpath, cur):
|
||||
command = [
|
||||
'python3',
|
||||
cur + '/litan.py',
|
||||
cur+'/lits/',
|
||||
cur+'/litout.out'
|
||||
]
|
||||
command = ["python3", cur + "/litan.py", cur + "/lits/", cur + "/litout.out"]
|
||||
worker1 = subprocess.Popen(command)
|
||||
print(worker1.communicate())
|
||||
|
||||
|
||||
command1 = [
|
||||
'python3',
|
||||
cur + '/strcmp-strings.py',
|
||||
cur + '/strcmp-strs/',
|
||||
cur + '/strcmp-strings.out'
|
||||
]
|
||||
"python3",
|
||||
cur + "/strcmp-strings.py",
|
||||
cur + "/strcmp-strs/",
|
||||
cur + "/strcmp-strings.out",
|
||||
]
|
||||
worker2 = subprocess.Popen(command1)
|
||||
print(worker2.communicate())
|
||||
|
||||
command2 = [
|
||||
'python3',
|
||||
cur + '/strncmp-strings.py',
|
||||
cur + '/strncmp-strs/',
|
||||
cur + '/strncmp-strings.out'
|
||||
]
|
||||
"python3",
|
||||
cur + "/strncmp-strings.py",
|
||||
cur + "/strncmp-strs/",
|
||||
cur + "/strncmp-strings.out",
|
||||
]
|
||||
worker3 = subprocess.Popen(command2)
|
||||
print(worker3.communicate())
|
||||
|
||||
|
||||
|
||||
command5 = [
|
||||
'python3',
|
||||
cur + '/memcmp-strings.py',
|
||||
cur + '/memcmp-strs/',
|
||||
cur + '/memcmp-strings.out'
|
||||
]
|
||||
"python3",
|
||||
cur + "/memcmp-strings.py",
|
||||
cur + "/memcmp-strs/",
|
||||
cur + "/memcmp-strings.out",
|
||||
]
|
||||
worker6 = subprocess.Popen(command5)
|
||||
print(worker6.communicate())
|
||||
|
||||
|
||||
|
||||
command8 = [
|
||||
'python3',
|
||||
cur + '/stan-strings.py',
|
||||
cur + '/strtool-strs/',
|
||||
cur + '/strtool-strings.out'
|
||||
]
|
||||
"python3",
|
||||
cur + "/stan-strings.py",
|
||||
cur + "/strtool-strs/",
|
||||
cur + "/strtool-strings.out",
|
||||
]
|
||||
worker9 = subprocess.Popen(command8)
|
||||
print(worker9.communicate())
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.tokenpath)
|
||||
#copy_tokens(args.cur, args.tokenpath)
|
||||
# copy_tokens(args.cur, args.tokenpath)
|
||||
codeql_analysis(args.cur, args.db)
|
||||
copy_tokens(args.cur, args.tokenpath)
|
||||
#start_autodict(args.tokenpath, args.cur)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
# start_autodict(args.tokenpath, args.cur)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -4,7 +4,7 @@
|
||||
# Author : Microsvuln - Arash.vre@gmail.com
|
||||
import string
|
||||
import os
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import struct
|
||||
import errno
|
||||
@ -12,75 +12,101 @@ import argparse
|
||||
import re
|
||||
import base64
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file to analysis and output folder to save corpdirus for constants in the overall project ------- Example usage : python2 thisfile.py outdir o.txt"))
|
||||
parser.add_argument("corpdir",
|
||||
help="The path to the corpus directory to generate files.")
|
||||
parser.add_argument("infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-hex.txt, analysis take place on this file, example : python2 thisfile.py outdir out.txt")
|
||||
return parser.parse_args()
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file to analysis and output folder to save corpdirus for constants in the overall project ------- Example usage : python2 thisfile.py outdir o.txt"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"corpdir", help="The path to the corpus directory to generate files."
|
||||
)
|
||||
parser.add_argument(
|
||||
"infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-hex.txt, analysis take place on this file, example : python2 thisfile.py outdir out.txt",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
#print "[-] Directory exists, specify another directory"
|
||||
# print "[-] Directory exists, specify another directory"
|
||||
exit(1)
|
||||
|
||||
|
||||
def do_analysis1(corpdir, infile):
|
||||
with open(infile, "rb") as f:
|
||||
lines = f.readlines()[1:]
|
||||
f.close()
|
||||
with open(infile, "rb") as f:
|
||||
lines = f.readlines()[1:]
|
||||
f.close()
|
||||
new_lst = []
|
||||
n = 1
|
||||
for i, num in enumerate(lines):
|
||||
if i != 0:
|
||||
new_lst.append(num)
|
||||
new_lst.append(num)
|
||||
str1 = str(num)
|
||||
print ("num is " + str1)
|
||||
str1 = str1.rstrip('\n\n')
|
||||
#str1 = str1.replace("0x","");
|
||||
str1 = str1.replace("|","")
|
||||
str1 = str1.rstrip('\r\n')
|
||||
str1 = str1.rstrip('\n')
|
||||
str1 = str1.replace(" ","")
|
||||
#str1 = str1.translate(None, string.punctuation)
|
||||
translator=str.maketrans('','',string.punctuation)
|
||||
str1=str1.translate(translator)
|
||||
print("num is " + str1)
|
||||
str1 = str1.rstrip("\n\n")
|
||||
# str1 = str1.replace("0x","");
|
||||
str1 = str1.replace("|", "")
|
||||
str1 = str1.rstrip("\r\n")
|
||||
str1 = str1.rstrip("\n")
|
||||
str1 = str1.replace(" ", "")
|
||||
# str1 = str1.translate(None, string.punctuation)
|
||||
translator = str.maketrans("", "", string.punctuation)
|
||||
str1 = str1.translate(translator)
|
||||
str1 = str1[1:]
|
||||
str1 = str1[:-1]
|
||||
print("After cleanup : " + str1)
|
||||
if (str1 != '0') and (str1 != 'ffffffff') and (str1 != 'fffffffe') or (len(str1) == 4) or (len(str1) == 8):
|
||||
print ("first : "+str1)
|
||||
if len(str1) > 8 :
|
||||
if (
|
||||
(str1 != "0")
|
||||
and (str1 != "ffffffff")
|
||||
and (str1 != "fffffffe")
|
||||
or (len(str1) == 4)
|
||||
or (len(str1) == 8)
|
||||
):
|
||||
print("first : " + str1)
|
||||
if len(str1) > 8:
|
||||
str1 = str1[:-1]
|
||||
elif (len(str1) == 5) :
|
||||
elif len(str1) == 5:
|
||||
str1 = str1 = "0"
|
||||
try:
|
||||
#str1 = str1.decode("hex")
|
||||
with open(corpdir+'/lit-seed{0}'.format(n), 'w') as file:
|
||||
str1 = str1.replace("0x","");
|
||||
print (str1)
|
||||
str1 = int(str1,base=16)
|
||||
str1 = str1.to_bytes(4, byteorder='little')
|
||||
file.write(str(str1))
|
||||
file.close()
|
||||
with open (corpdir+'/lit-seed{0}'.format(n), 'r') as q :
|
||||
a = q.readline()
|
||||
a = a[1:]
|
||||
print ("AFL++ Autodict-QL by Microsvuln : Writing Token :" + str(a))
|
||||
q.close()
|
||||
with open (corpdir+'/lit-seed{0}'.format(n), 'w') as w1 :
|
||||
w1.write(str(a))
|
||||
print ("Done!")
|
||||
w1.close()
|
||||
except:
|
||||
print("Error!")
|
||||
n = n+1
|
||||
# str1 = str1.decode("hex")
|
||||
with open(corpdir + "/lit-seed{0}".format(n), "w") as file:
|
||||
str1 = str1.replace("0x", "")
|
||||
print(str1)
|
||||
str1 = int(str1, base=16)
|
||||
str1 = str1.to_bytes(4, byteorder="little")
|
||||
file.write(str(str1))
|
||||
file.close()
|
||||
with open(corpdir + "/lit-seed{0}".format(n), "r") as q:
|
||||
a = q.readline()
|
||||
a = a[1:]
|
||||
print(
|
||||
"AFL++ Autodict-QL by Microsvuln : Writing Token :"
|
||||
+ str(a)
|
||||
)
|
||||
q.close()
|
||||
with open(
|
||||
corpdir + "/lit-seed{0}".format(n), "w"
|
||||
) as w1:
|
||||
w1.write(str(a))
|
||||
print("Done!")
|
||||
w1.close()
|
||||
except:
|
||||
print("Error!")
|
||||
n = n + 1
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.corpdir)
|
||||
do_analysis1(args.corpdir, args.infile)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
import os
|
||||
import string
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import errno
|
||||
import struct
|
||||
@ -13,6 +13,7 @@ import argparse
|
||||
import re
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
@ -20,44 +21,63 @@ def ensure_dir(dir):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt" ))
|
||||
parser.add_argument("corpdir",
|
||||
help="The path to the corpus directory to generate strings.")
|
||||
parser.add_argument("infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"corpdir", help="The path to the corpus directory to generate strings."
|
||||
)
|
||||
parser.add_argument(
|
||||
"infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def do_string_analysis(corpdir, infile1):
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
new_lst1 = []
|
||||
n = 1
|
||||
for i, num1 in enumerate(lines):
|
||||
if i != 0:
|
||||
new_lst1.append(num1)
|
||||
#print("num : %s" % num1)
|
||||
# print("num : %s" % num1)
|
||||
str11 = str(num1)
|
||||
str11 = str11.replace("|","")
|
||||
str11 = str11.replace("\n","")
|
||||
str11 = str11.replace("|", "")
|
||||
str11 = str11.replace("\n", "")
|
||||
str11 = str11.lstrip()
|
||||
str11 = str11.rstrip()
|
||||
str11 = str(str11)
|
||||
if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
|
||||
if (
|
||||
(" " in str11)
|
||||
or (")" in str11)
|
||||
or ("(" in str11)
|
||||
or ("<" in str11)
|
||||
or (">" in str11)
|
||||
):
|
||||
print("Space / Paranthesis String : %s" % str11)
|
||||
else :
|
||||
with open(corpdir+'/memcmp-str{0}'.format(n), 'w') as file:
|
||||
file.write(str11)
|
||||
print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
|
||||
n=n+1
|
||||
else:
|
||||
with open(corpdir + "/memcmp-str{0}".format(n), "w") as file:
|
||||
file.write(str11)
|
||||
print(
|
||||
"AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
|
||||
% str11
|
||||
)
|
||||
n = n + 1
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.corpdir)
|
||||
do_string_analysis(args.corpdir, args.infile)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
import os
|
||||
import string
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import errno
|
||||
import struct
|
||||
@ -13,6 +13,7 @@ import argparse
|
||||
import re
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
@ -20,44 +21,63 @@ def ensure_dir(dir):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt" ))
|
||||
parser.add_argument("corpdir",
|
||||
help="The path to the corpus directory to generate strings.")
|
||||
parser.add_argument("infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"corpdir", help="The path to the corpus directory to generate strings."
|
||||
)
|
||||
parser.add_argument(
|
||||
"infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def do_string_analysis(corpdir, infile1):
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
new_lst1 = []
|
||||
n = 1
|
||||
for i, num1 in enumerate(lines):
|
||||
if i != 0:
|
||||
new_lst1.append(num1)
|
||||
#print("num : %s" % num1)
|
||||
# print("num : %s" % num1)
|
||||
str11 = str(num1)
|
||||
str11 = str11.replace("|","")
|
||||
str11 = str11.replace("\n","")
|
||||
str11 = str11.replace("|", "")
|
||||
str11 = str11.replace("\n", "")
|
||||
str11 = str11.lstrip()
|
||||
str11 = str11.rstrip()
|
||||
str11 = str(str11)
|
||||
if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
|
||||
if (
|
||||
(" " in str11)
|
||||
or (")" in str11)
|
||||
or ("(" in str11)
|
||||
or ("<" in str11)
|
||||
or (">" in str11)
|
||||
):
|
||||
print("Space / Paranthesis String : %s" % str11)
|
||||
else :
|
||||
with open(corpdir+'/seed-str{0}'.format(n), 'w') as file:
|
||||
file.write(str11)
|
||||
print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
|
||||
n=n+1
|
||||
else:
|
||||
with open(corpdir + "/seed-str{0}".format(n), "w") as file:
|
||||
file.write(str11)
|
||||
print(
|
||||
"AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
|
||||
% str11
|
||||
)
|
||||
n = n + 1
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.corpdir)
|
||||
do_string_analysis(args.corpdir, args.infile)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
import os
|
||||
import string
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import errno
|
||||
import struct
|
||||
@ -13,6 +13,7 @@ import argparse
|
||||
import re
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
@ -20,44 +21,63 @@ def ensure_dir(dir):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt" ))
|
||||
parser.add_argument("corpdir",
|
||||
help="The path to the corpus directory to generate strings.")
|
||||
parser.add_argument("infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"corpdir", help="The path to the corpus directory to generate strings."
|
||||
)
|
||||
parser.add_argument(
|
||||
"infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def do_string_analysis(corpdir, infile1):
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
new_lst1 = []
|
||||
n = 1
|
||||
for i, num1 in enumerate(lines):
|
||||
if i != 0:
|
||||
new_lst1.append(num1)
|
||||
#print("num : %s" % num1)
|
||||
# print("num : %s" % num1)
|
||||
str11 = str(num1)
|
||||
str11 = str11.replace("|","")
|
||||
str11 = str11.replace("\n","")
|
||||
str11 = str11.replace("|", "")
|
||||
str11 = str11.replace("\n", "")
|
||||
str11 = str11.lstrip()
|
||||
str11 = str11.rstrip()
|
||||
str11 = str(str11)
|
||||
if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
|
||||
if (
|
||||
(" " in str11)
|
||||
or (")" in str11)
|
||||
or ("(" in str11)
|
||||
or ("<" in str11)
|
||||
or (">" in str11)
|
||||
):
|
||||
print("Space / Paranthesis String : %s" % str11)
|
||||
else :
|
||||
with open(corpdir+'/strcmp-str{0}'.format(n), 'w') as file:
|
||||
file.write(str11)
|
||||
print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
|
||||
n=n+1
|
||||
else:
|
||||
with open(corpdir + "/strcmp-str{0}".format(n), "w") as file:
|
||||
file.write(str11)
|
||||
print(
|
||||
"AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
|
||||
% str11
|
||||
)
|
||||
n = n + 1
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.corpdir)
|
||||
do_string_analysis(args.corpdir, args.infile)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
import os
|
||||
import string
|
||||
import binascii
|
||||
import binascii
|
||||
import codecs
|
||||
import errno
|
||||
import struct
|
||||
@ -13,6 +13,7 @@ import argparse
|
||||
import re
|
||||
from binascii import unhexlify
|
||||
|
||||
|
||||
def ensure_dir(dir):
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
@ -20,44 +21,63 @@ def ensure_dir(dir):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt" ))
|
||||
parser.add_argument("corpdir",
|
||||
help="The path to the corpus directory to generate strings.")
|
||||
parser.add_argument("infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Helper - Specify input file analysis and output folder to save corpus for strings in the overall project --------------------------------------------------------------------------- Example usage : python2 thisfile.py outdir str.txt"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"corpdir", help="The path to the corpus directory to generate strings."
|
||||
)
|
||||
parser.add_argument(
|
||||
"infile",
|
||||
help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def do_string_analysis(corpdir, infile1):
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
with open(infile1, "r") as f1:
|
||||
lines = f1.readlines()[1:]
|
||||
f1.close()
|
||||
new_lst1 = []
|
||||
n = 1
|
||||
for i, num1 in enumerate(lines):
|
||||
if i != 0:
|
||||
new_lst1.append(num1)
|
||||
#print("num : %s" % num1)
|
||||
# print("num : %s" % num1)
|
||||
str11 = str(num1)
|
||||
str11 = str11.replace("|","")
|
||||
str11 = str11.replace("\n","")
|
||||
str11 = str11.replace("|", "")
|
||||
str11 = str11.replace("\n", "")
|
||||
str11 = str11.lstrip()
|
||||
str11 = str11.rstrip()
|
||||
str11 = str(str11)
|
||||
if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
|
||||
if (
|
||||
(" " in str11)
|
||||
or (")" in str11)
|
||||
or ("(" in str11)
|
||||
or ("<" in str11)
|
||||
or (">" in str11)
|
||||
):
|
||||
print("Space / Paranthesis String : %s" % str11)
|
||||
else :
|
||||
with open(corpdir+'/strncmp-str{0}'.format(n), 'w') as file:
|
||||
file.write(str11)
|
||||
print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
|
||||
n=n+1
|
||||
else:
|
||||
with open(corpdir + "/strncmp-str{0}".format(n), "w") as file:
|
||||
file.write(str11)
|
||||
print(
|
||||
"AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
|
||||
% str11
|
||||
)
|
||||
n = n + 1
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
args = parse_args()
|
||||
ensure_dir(args.corpdir)
|
||||
do_string_analysis(args.corpdir, args.infile)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user