mirror of
https://github.com/corda/corda.git
synced 2025-01-01 02:36:44 +00:00
Clean up build_sections and build_mem_region code
Modify build_mem_region to support an unaligned starting address. This makes it symmetrical with regard to starting and ending partial pages; previously, build_mem_region only support partial pages at the end of a section (caller was responsible for handling the first page). Remove the edge case handling of the first page from build_sections now that build_mem_region does not have alignment restrictions. Change the section_info_t parameter in build_mem_region to be a const reference to eliminate any need to check for a null pointer. Make the raw_data pointer in section_info_t const, as the source file's data should never be modified. This is currently cast away via GET_PTR in build_pages when calling into add_enclave_page; the add_enclave_page flow can be modified by a future commit to retain the const modifier. Add two utilities, is_relocation_page and build_partial_page, to reduce copy-paste code. Add PAGE_OFFSET macro to calculate the offset within a page. Assert on address/size alignment in build_pages and build_context to document expected alignment and catch any related code bugs. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
This commit is contained in:
parent
85947caa12
commit
14c19edaaf
@ -58,6 +58,7 @@
|
||||
#define ROUND_TO(x, align) (((x) + ((align)-1)) & ~((align)-1))
|
||||
#define ROUND_TO_PAGE(x) ROUND_TO(x, SE_PAGE_SIZE)
|
||||
#define TRIM_TO_PAGE(x) ((x) & ~(SE_PAGE_SIZE-1))
|
||||
#define PAGE_OFFSET(x) ((x) & (SE_PAGE_SIZE -1))
|
||||
#ifdef __cplusplus
|
||||
#define PAGE_ALIGN(t, x) reinterpret_cast<t*>((reinterpret_cast<size_t>(x)+(SE_PAGE_SIZE-1)) & (~(SE_PAGE_SIZE-1)))
|
||||
#else
|
||||
|
@ -101,76 +101,50 @@ void* CLoader::get_symbol_address(const char * const symbol)
|
||||
return GET_PTR(void, m_start_addr, rva);
|
||||
}
|
||||
|
||||
int CLoader::build_mem_region(const section_info_t * const sec_info)
|
||||
int CLoader::build_mem_region(const section_info_t &sec_info)
|
||||
{
|
||||
int ret = SGX_SUCCESS;
|
||||
uint8_t added_page[SE_PAGE_SIZE];
|
||||
uint64_t offset = 0;
|
||||
uint8_t *raw_ptr = NULL;
|
||||
uint64_t rva = 0;
|
||||
sec_info_t sinfo;
|
||||
memset(&sinfo, 0, sizeof(sinfo));
|
||||
|
||||
rva = sec_info->rva + offset;
|
||||
while(offset < TRIM_TO_PAGE(sec_info->raw_data_size))
|
||||
// Build pages of the section that are contain initialized data. Each page
|
||||
// needs to be added individually as the page may hold relocation data, in
|
||||
// which case the page needs to be marked writable.
|
||||
while(offset < sec_info.raw_data_size)
|
||||
{
|
||||
raw_ptr = sec_info->raw_data + offset;
|
||||
sinfo.flags = sec_info->flag;
|
||||
uint64_t rva = sec_info.rva + offset;
|
||||
uint64_t size = MIN((SE_PAGE_SIZE - PAGE_OFFSET(rva)), (sec_info.raw_data_size - offset));
|
||||
|
||||
//check if the page is writable.
|
||||
if(sec_info->bitmap && sec_info->bitmap->size())
|
||||
{
|
||||
uint64_t page_frame = rva >> SE_PAGE_SHIFT;
|
||||
sinfo.flags = sec_info.flag;
|
||||
if (is_relocation_page(rva, sec_info.bitmap))
|
||||
sinfo.flags = sec_info.flag | SI_FLAG_W;
|
||||
|
||||
//NOTE:
|
||||
// Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
|
||||
// In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
|
||||
// It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
|
||||
if((*sec_info->bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8)))
|
||||
sinfo.flags = sec_info->flag | SI_FLAG_W;
|
||||
}
|
||||
//call driver API to add page; raw_ptr needn't be page align, driver will handle page align;
|
||||
if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, raw_ptr, rva, sinfo, ADD_EXTEND_PAGE)))
|
||||
{
|
||||
//if add page failed , we should remove enclave somewhere;
|
||||
if (size == SE_PAGE_SIZE)
|
||||
ret = build_pages(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
|
||||
else
|
||||
ret = build_partial_page(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
|
||||
if(SGX_SUCCESS != ret)
|
||||
return ret;
|
||||
}
|
||||
offset += SE_PAGE_SIZE;
|
||||
rva = sec_info->rva + offset;
|
||||
|
||||
// The only time we aren't guaranteed to advance the offset by a full
|
||||
// page is when the rva to be added starts in the middle of a page, as
|
||||
// offset is always advanced to the next page boundary. The only case
|
||||
// where the rva can start in the middle of the page is for the initial
|
||||
// rva, i.e. offset == 0.
|
||||
offset += (offset == 0) ? size : SE_PAGE_SIZE;
|
||||
}
|
||||
//add the remaider of last page of raw data
|
||||
if(!IS_PAGE_ALIGNED(sec_info->raw_data_size))
|
||||
|
||||
// Add any remaining uninitialized data. We can call build_pages directly
|
||||
// even if there are partial pages since the source is null, i.e. everything
|
||||
// is filled with '0'. Uninitialied data cannot be a relocation table, ergo
|
||||
// there is no need to check the relocation bitmap.
|
||||
if(sec_info.virtual_size > offset)
|
||||
{
|
||||
sinfo.flags = sec_info->flag;
|
||||
//the padding be 0
|
||||
memset(added_page, 0, SE_PAGE_SIZE);
|
||||
raw_ptr = sec_info->raw_data + offset;
|
||||
rva = sec_info->rva + offset;
|
||||
memcpy_s(added_page, SE_PAGE_SIZE, raw_ptr, sec_info->raw_data_size & (SE_PAGE_SIZE-1));
|
||||
//check if the page is writable.
|
||||
if(sec_info->bitmap && sec_info->bitmap->size())
|
||||
{
|
||||
uint64_t page_frame = rva >> SE_PAGE_SHIFT;
|
||||
//NOTE:
|
||||
// Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
|
||||
// In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
|
||||
// It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
|
||||
if((*sec_info->bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8)))
|
||||
sinfo.flags = sec_info->flag | SI_FLAG_W;
|
||||
}
|
||||
//call driver to add page;
|
||||
if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, added_page, rva, sinfo, ADD_EXTEND_PAGE)))
|
||||
{
|
||||
//if add page failed , we should remove enclave somewhere;
|
||||
return ret;
|
||||
}
|
||||
rva += SE_PAGE_SIZE;
|
||||
}
|
||||
//add unintialized page.If the section have no raw data, the offset should be 0.
|
||||
if(ROUND_TO_PAGE(sec_info->virtual_size) > ROUND_TO_PAGE(sec_info->raw_data_size))
|
||||
{
|
||||
size_t size = (size_t)(ROUND_TO_PAGE(sec_info->virtual_size) - ROUND_TO_PAGE(sec_info->raw_data_size));
|
||||
sinfo.flags = sec_info->flag;
|
||||
uint64_t rva = sec_info.rva + offset;
|
||||
size_t size = (size_t)(ROUND_TO_PAGE(sec_info.virtual_size - offset));
|
||||
|
||||
sinfo.flags = sec_info.flag;
|
||||
if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
|
||||
return ret;
|
||||
}
|
||||
@ -187,9 +161,6 @@ int CLoader::build_sections(vector<uint8_t> *bitmap)
|
||||
|
||||
for(unsigned int i = 0; i < sections.size() ; i++)
|
||||
{
|
||||
|
||||
|
||||
|
||||
if((last_section != NULL) &&
|
||||
(ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())) &&
|
||||
(ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size()) < (sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)))))
|
||||
@ -208,45 +179,12 @@ int CLoader::build_sections(vector<uint8_t> *bitmap)
|
||||
max_rva = sections[i]->get_rva();
|
||||
last_section = sections[i];
|
||||
}
|
||||
//since build_mem_region require the sec_info.rva be page aligned, we need handle the first page.
|
||||
//build the first page;
|
||||
uint64_t offset = (sections[i]->get_rva() & (SE_PAGE_SIZE -1));
|
||||
uint64_t size = SE_PAGE_SIZE - offset;
|
||||
uint8_t first_page[SE_PAGE_SIZE];
|
||||
//the raw data may be smaller than the size, we get the min of them
|
||||
if(sections[i]->raw_data_size() < size)
|
||||
size = sections[i]->raw_data_size();
|
||||
//the padding is '0'
|
||||
memset(first_page, 0, SE_PAGE_SIZE);
|
||||
memcpy_s(&first_page[offset], (size_t)size, sections[i]->raw_data(), (size_t)size);
|
||||
section_info_t sec_info = { first_page, SE_PAGE_SIZE, sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)), SE_PAGE_SIZE, sections[i]->get_si_flags(), bitmap };
|
||||
|
||||
if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
|
||||
{
|
||||
section_info_t sec_info = { sections[i]->raw_data(), sections[i]->raw_data_size(), sections[i]->get_rva(), sections[i]->virtual_size(), sections[i]->get_si_flags(), bitmap };
|
||||
if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
|
||||
return ret;
|
||||
}
|
||||
//if there is more pages, then build the next paged aligned pages
|
||||
if((sections[i]->virtual_size() + offset) > SE_PAGE_SIZE)
|
||||
{
|
||||
sec_info.raw_data = GET_PTR(uint8_t, sections[i]->raw_data(), size);
|
||||
sec_info.raw_data_size = sections[i]->raw_data_size() - size;
|
||||
sec_info.rva = sections[i]->get_rva() + (SE_PAGE_SIZE - offset);
|
||||
assert(0 == (sec_info.rva & (SE_PAGE_SIZE - 1)));
|
||||
//we need use (SE_PAGE_SIZE - offset), because (SE_PAGE_SIZE - offset) may larger than size
|
||||
sec_info.virtual_size = sections[i]->virtual_size() - (SE_PAGE_SIZE - offset);
|
||||
sec_info.flag = sections[i]->get_si_flags();
|
||||
sec_info.bitmap = bitmap;
|
||||
if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
if((last_section != NULL) &&
|
||||
(ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())))
|
||||
{
|
||||
@ -262,16 +200,37 @@ int CLoader::build_sections(vector<uint8_t> *bitmap)
|
||||
return SGX_SUCCESS;
|
||||
}
|
||||
|
||||
int CLoader::build_pages(const uint64_t start_rva, const uint64_t size, void *source, const sec_info_t &sinfo, const uint32_t attr)
|
||||
int CLoader::build_partial_page(const uint64_t rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
|
||||
{
|
||||
// RVA may or may not be aligned.
|
||||
uint64_t offset = PAGE_OFFSET(rva);
|
||||
|
||||
// Initialize the page with '0', this serves as both the padding at the start
|
||||
// of the page (if it's not aligned) as well as the fill for any unitilized
|
||||
// bytes at the end of the page, e.g. .bss data.
|
||||
uint8_t page_data[SE_PAGE_SIZE];
|
||||
memset(page_data, 0, SE_PAGE_SIZE);
|
||||
|
||||
// The amount of raw data may be less than the number of bytes on the page,
|
||||
// but that portion of page_data has already been filled (see above).
|
||||
memcpy_s(&page_data[offset], (size_t)(SE_PAGE_SIZE - offset), source, (size_t)size);
|
||||
|
||||
// Add the page, trimming the start address to make it page aligned.
|
||||
return build_pages(TRIM_TO_PAGE(rva), SE_PAGE_SIZE, page_data, sinfo, attr);
|
||||
}
|
||||
|
||||
int CLoader::build_pages(const uint64_t start_rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
|
||||
{
|
||||
int ret = SGX_SUCCESS;
|
||||
uint64_t offset = 0;
|
||||
uint64_t rva = start_rva;
|
||||
|
||||
assert(IS_PAGE_ALIGNED(start_rva) && IS_PAGE_ALIGNED(size));
|
||||
|
||||
while(offset < size)
|
||||
{
|
||||
//call driver to add page;
|
||||
if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, source, rva, sinfo, attr)))
|
||||
if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, GET_PTR(void, source, 0), rva, sinfo, attr)))
|
||||
{
|
||||
//if add page failed , we should remove enclave somewhere;
|
||||
return ret;
|
||||
@ -290,6 +249,8 @@ int CLoader::build_context(const uint64_t start_rva, layout_entry_t *layout)
|
||||
memset(&sinfo, 0, sizeof(sinfo));
|
||||
uint64_t rva = start_rva + layout->rva;
|
||||
|
||||
assert(IS_PAGE_ALIGNED(rva));
|
||||
|
||||
if (layout->content_offset)
|
||||
{
|
||||
// assume TCS is only 1 page
|
||||
@ -312,7 +273,7 @@ int CLoader::build_context(const uint64_t start_rva, layout_entry_t *layout)
|
||||
else // guard page should not have content_offset != 0
|
||||
{
|
||||
section_info_t sec_info = {GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size, rva, layout->page_count << SE_PAGE_SHIFT, layout->si_flags, NULL};
|
||||
if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
|
||||
if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
@ -468,6 +429,22 @@ bool CLoader::is_enclave_buffer(uint64_t offset, uint64_t size)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// is_relocation_page returns true if the specified RVA is a writable relocation page based on the bitmap.
|
||||
bool CLoader::is_relocation_page(const uint64_t rva, vector<uint8_t> *bitmap)
|
||||
{
|
||||
if(bitmap && bitmap->size())
|
||||
{
|
||||
uint64_t page_frame = rva >> SE_PAGE_SHIFT;
|
||||
|
||||
//NOTE:
|
||||
// Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
|
||||
// In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
|
||||
// It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
|
||||
return ((*bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8)));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
int CLoader::validate_layout_table()
|
||||
{
|
||||
layout_t *layout_start = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset);
|
||||
|
@ -68,12 +68,14 @@ public:
|
||||
int set_memory_protection();
|
||||
|
||||
private:
|
||||
int build_mem_region(const section_info_t * const sec_info);
|
||||
int build_mem_region(const section_info_t &sec_info);
|
||||
int build_image(SGXLaunchToken * const lc, sgx_attributes_t * const secs_attr, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t * const misc_attr);
|
||||
int build_secs(sgx_attributes_t * const secs_attr, sgx_misc_attribute_t * const misc_attr);
|
||||
int build_context(const uint64_t start_rva, layout_entry_t *layout);
|
||||
int build_contexts(layout_t *layout_start, layout_t *layout_end, uint64_t delta);
|
||||
int build_pages(const uint64_t start_rva, const uint64_t size, void *source, const sec_info_t &sinfo, const uint32_t attr);
|
||||
int build_partial_page(const uint64_t rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr);
|
||||
int build_pages(const uint64_t start_rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr);
|
||||
bool is_relocation_page(const uint64_t rva, vector<uint8_t> *bitmap);
|
||||
|
||||
bool is_ae(const enclave_css_t *enclave_css);
|
||||
bool is_metadata_buffer(uint32_t offset, uint32_t size);
|
||||
|
@ -40,7 +40,7 @@ using namespace std;
|
||||
|
||||
typedef struct _section_info_t
|
||||
{
|
||||
uint8_t *raw_data; //The file pointer to the first page of the section.
|
||||
const uint8_t *raw_data; //The file pointer to the first page of the section.
|
||||
uint64_t raw_data_size; //The size of the section or the size of the initialized section on disk.
|
||||
uint64_t rva; //The address of the first byte of the section relative to the image base when section is loaded into memory.
|
||||
uint64_t virtual_size; //The total size of the section when loaded into memory.
|
||||
|
Loading…
Reference in New Issue
Block a user