lx_emul: manage page structs per buffer range

The management of Linux page structs is now tied to the life time of DMA
buffers. Thus, page structs are created when a buffer is allocated and
deallocated only when a buffer is freed - not on lx_emul_mem_free()
because DMA buffers are cached. Page struct refcounting was entirely
reworked in lx_emul/shadow/mm/page_alloc.c.

Fixes #4809
This commit is contained in:
Christian Helmuth 2023-08-21 14:28:22 +02:00 committed by Norman Feske
parent 73bf682b62
commit 1e7116fcc1
25 changed files with 310 additions and 335 deletions

View File

@ -146,7 +146,7 @@ CC_C_OPT += -Wno-restrict -Wno-maybe-uninitialized -Werror=date-time
CC_C_OPT += -Wno-alloc-size-larger-than -Wimplicit-fallthrough=5 -Werror=date-time
CC_C_OPT += -Werror=incompatible-pointer-types -Werror=designated-init
CC_C_OPT += -Wno-packed-not-aligned -Wno-unused-but-set-variable
CC_C_OPT += -Wno-discarded-qualifiers
CC_C_OPT += -Wno-discarded-qualifiers -Wno-unused-function
# avoid build errors because with CC_OLEVEL = -O0/-Og - not supported by Linux
override CC_OLEVEL := -O2

View File

@ -1,6 +1,7 @@
/*
* \brief Lx_emul support for page-struct management
* \author Norman Feske
* \author Christian Helmuth
* \date 2021-07-01
*/
@ -27,29 +28,27 @@ struct page;
void lx_emul_associate_page_with_virt_addr(struct page *, void const *virt);
void lx_emul_disassociate_page_from_virt_addr(void const *virt);
struct page *lx_emul_associated_page(void const *virt, unsigned long size);
struct page *lx_emul_associated_page(void const *virt);
/**
* Return page struct for the page at a given virtual address
* Return page struct for the page at a given virtual address (virt_to_page.cc)
*
* If no page struct exists for the virtual address, it is created.
* As in Linux page structs of contiguous pages of attached DMA/RAM buffers
* (i.e., page ranges) are contiguous too.
*/
struct page *lx_emul_virt_to_pages(void const *virt, unsigned long num);
struct page *lx_emul_virt_to_page(void const *virt);
/**
* Release page structs for specified virtual-address range
*
* \param size size of range in bytes
* Release page structs for specified virtual-address range (virt_to_page.c)
*/
void lx_emul_forget_pages(void const *virt, unsigned long size);
void lx_emul_remove_page_range(void const *virt_addr, unsigned long size);
/**
* Perform unit test
* Initialize page structs for specified virtual-address range (virt_to_page.c)
*/
void lx_emul_associate_page_selftest(void);
void lx_emul_add_page_range(void const *virt_addr, unsigned long size);
#ifdef __cplusplus

View File

@ -50,7 +50,7 @@ typedef struct page *pgtable_t;
#define page_to_phys(p) __pa((p)->virtual)
#define page_to_virt(p) ((p)->virtual)
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_pages(v, 1U); }
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_page(v); }
/* needed by mm/internal.h */
#define pfn_valid(pfn) (pfn != 0UL)

View File

@ -53,7 +53,7 @@ extern u64 vabits_actual;
#define page_to_phys(p) __pa((p)->virtual)
#define page_to_virt(p) ((p)->virtual)
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_pages(v, 1U); }
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_page(v); }
#define pfn_to_page(pfn) ( (struct page *)(__va(pfn << PAGE_SHIFT)) )
#define page_to_pfn(page) ( page_to_phys(page) >> PAGE_SHIFT )

View File

@ -61,7 +61,7 @@ typedef struct page *pgtable_t;
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_pages(v, 1U); }
static inline struct page *virt_to_page(void const *v) { return lx_emul_virt_to_page(v); }
#define page_to_virt(p) ((p)->virtual)
#define virt_addr_valid(kaddr) ((unsigned long)kaddr != 0UL)

View File

@ -25,6 +25,8 @@ lx_emul_shared_dma_buffer_allocate(unsigned long size);
void lx_emul_shared_dma_buffer_free(struct genode_shared_dataspace * ds);
void * lx_emul_shared_dma_buffer_virt_addr(struct genode_shared_dataspace * ds);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,153 @@
/*
* \brief Shared-memory file utility
* \author Christian Helmuth
* \author Josef Söntgen
* \date 2023-12-04
*
* This utility implements limited shared-memory file semantics as required by
* Linux graphics drivers (e.g., intel_fb and lima_gpu_drv)
*/
/*
* Copyright (C) 2023 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#include <lx_emul/shared_dma_buffer.h>
#include <linux/shmem_fs.h>
struct shmem_file_buffer
{
struct genode_shared_dataspace *dataspace;
void *addr;
struct page *pages;
};
struct file *shmem_file_setup(char const *name, loff_t size,
unsigned long flags)
{
struct file *f;
struct inode *inode;
struct address_space *mapping;
struct shmem_file_buffer *private_data;
loff_t const nrpages = DIV_ROUND_UP(size, PAGE_SIZE);
if (!size)
return (struct file*)ERR_PTR(-EINVAL);
f = kzalloc(sizeof (struct file), 0);
if (!f) {
return (struct file*)ERR_PTR(-ENOMEM);
}
inode = kzalloc(sizeof (struct inode), 0);
if (!inode) {
goto err_inode;
}
mapping = kzalloc(sizeof (struct address_space), 0);
if (!mapping) {
goto err_mapping;
}
private_data = kzalloc(sizeof (struct shmem_file_buffer), 0);
if (!private_data) {
goto err_private_data;
}
private_data->dataspace = lx_emul_shared_dma_buffer_allocate(nrpages * PAGE_SIZE);
if (!private_data->dataspace)
goto err_private_data_addr;
private_data->addr = lx_emul_shared_dma_buffer_virt_addr(private_data->dataspace);
private_data->pages = lx_emul_virt_to_page(private_data->addr);
mapping->private_data = private_data;
mapping->nrpages = nrpages;
inode->i_mapping = mapping;
atomic_long_set(&f->f_count, 1);
f->f_inode = inode;
f->f_mapping = mapping;
f->f_flags = flags;
f->f_mode = OPEN_FMODE(flags);
f->f_mode |= FMODE_OPENED;
return f;
err_private_data_addr:
kfree(private_data);
err_private_data:
kfree(mapping);
err_mapping:
kfree(inode);
err_inode:
kfree(f);
return (struct file*)ERR_PTR(-ENOMEM);
}
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
struct page *p;
struct shmem_file_buffer *private_data;
if (index > mapping->nrpages)
return NULL;
private_data = mapping->private_data;
p = private_data->pages;
return (p + index);
}
#include <linux/pagevec.h>
void __pagevec_release(struct pagevec * pvec)
{
/* XXX check if we have to call release_pages */
pagevec_reinit(pvec);
}
#include <linux/file.h>
static void _free_file(struct file *file)
{
struct inode *inode;
struct address_space *mapping;
struct shmem_file_buffer *private_data;
mapping = file->f_mapping;
inode = file->f_inode;
if (mapping) {
private_data = mapping->private_data;
lx_emul_shared_dma_buffer_free(private_data->dataspace);
kfree(private_data);
kfree(mapping);
}
kfree(inode);
kfree(file->f_path.dentry);
kfree(file);
}
void fput(struct file *file)
{
if (!file)
return;
if (atomic_long_sub_and_test(1, &file->f_count)) {
_free_file(file);
}
}

View File

@ -1,6 +1,7 @@
/*
* \brief Lx_kit memory allocation backend
* \author Stefan Kalkowski
* \author Christian Helmuth
* \date 2021-03-25
*/
@ -99,7 +100,8 @@ class Lx_kit::Mem_allocator
void free_buffer(void *addr);
Dataspace_capability attached_dataspace_cap(void *addr);
void * alloc(size_t size, size_t align);
void * alloc(size_t size, size_t align,
void (*new_range_cb)(void const *virt_addr, unsigned long size));
addr_t dma_addr(void * addr);
addr_t virt_addr(void * dma_addr);
addr_t virt_region_start(void * virt_addr);

View File

@ -21,8 +21,7 @@
extern "C" void * lx_emul_mem_alloc_aligned(unsigned long size, unsigned long align)
{
void * const ptr = Lx_kit::env().memory.alloc(size, align);
lx_emul_forget_pages(ptr, size);
void * const ptr = Lx_kit::env().memory.alloc(size, align, &lx_emul_add_page_range);
return ptr;
};
@ -30,8 +29,7 @@ extern "C" void * lx_emul_mem_alloc_aligned(unsigned long size, unsigned long al
extern "C" void * lx_emul_mem_alloc_aligned_uncached(unsigned long size,
unsigned long align)
{
void * const ptr = Lx_kit::env().uncached_memory.alloc(size, align);
lx_emul_forget_pages(ptr, size);
void * const ptr = Lx_kit::env().uncached_memory.alloc(size, align, &lx_emul_add_page_range);
return ptr;
};
@ -53,7 +51,7 @@ extern "C" unsigned long lx_emul_mem_virt_addr(void * dma_addr)
if (ret)
return ret;
if (!(ret = Lx_kit::env().uncached_memory.virt_addr(dma_addr)))
Genode::error(__func__, " called with invalid addr ", dma_addr);
Genode::error(__func__, " called with invalid dma_addr ", dma_addr);
return ret;
}

View File

@ -1,6 +1,7 @@
/*
* \brief Lx_emul backend for page-struct management
* \author Norman Feske
* \author Christian Helmuth
* \date 2021-07-01
*/
@ -35,28 +36,22 @@ struct Lx_emul::Page_info
return key.virt > other_key.virt;
}
struct Query_virt_range
struct Query_virt_addr
{
addr_t virt;
size_t size;
bool matches(Page_info const &page_info) const
{
size_t const page_size = 4096;
Lx_kit::Byte_range page_range { page_info.key.virt, page_size };
Lx_kit::Byte_range virt_range { virt, size };
Lx_kit::Byte_range virt_range { virt, 1 };
return page_range.intersects(virt_range);
}
Key key() const { return Key { virt }; }
};
struct Query_virt_addr : Query_virt_range
{
Query_virt_addr(void const *virt) : Query_virt_range{(addr_t)virt, 1} { }
};
};
@ -75,13 +70,13 @@ extern "C" void lx_emul_associate_page_with_virt_addr(struct page *page, void co
void lx_emul_disassociate_page_from_virt_addr(void const *virt)
{
page_registry().remove(Lx_emul::Page_info::Query_virt_addr(virt));
page_registry().remove(Lx_emul::Page_info::Query_virt_addr { (addr_t)virt });
}
struct page *lx_emul_associated_page(void const *virt, unsigned long size)
struct page *lx_emul_associated_page(void const *virt)
{
Lx_emul::Page_info::Query_virt_range query { .virt = (addr_t)virt, .size = size };
Lx_emul::Page_info::Query_virt_addr query { (addr_t)virt };
struct page *page_ptr = nullptr;
page_registry().apply(query, [&] (Lx_emul::Page_info const &page_info) {

View File

@ -1,6 +1,7 @@
/*
* \brief Replaces mm/page_alloc.c
* \author Stefan Kalkowski
* \author Christian Helmuth
* \date 2021-06-03
*/
@ -27,7 +28,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp,int preferred_nid,
{
void const *ptr = lx_emul_mem_alloc_aligned(PAGE_SIZE*nr_pages, PAGE_SIZE);
struct page *page = lx_emul_virt_to_pages(ptr, nr_pages);
struct page *page = lx_emul_virt_to_page(ptr);
int i;
for (i = 0; i < nr_pages; i++) {
@ -42,40 +43,38 @@ unsigned long __alloc_pages_bulk(gfp_t gfp,int preferred_nid,
return nr_pages;
}
static void lx_free_pages(struct page *page, unsigned const num_pages)
static void lx_free_pages(struct page *page, bool force)
{
unsigned i;
void * const virt_addr = page->virtual;
void * const virt_addr = page_address(page);
if (atomic_read(&page->_refcount) && !atomic_dec_and_test(&page->_refcount))
if (force)
set_page_count(page, 0);
else if (!put_page_testzero(page))
return;
for (i = 0; i < num_pages; i++)
lx_emul_disassociate_page_from_virt_addr(page[i].virtual);
lx_emul_mem_free(virt_addr);
lx_emul_mem_free(page);
}
void __free_pages(struct page * page, unsigned int order)
{
lx_free_pages(page, (1u << order));
lx_free_pages(page, false);
}
void free_pages_exact(void *virt_addr, size_t size)
void free_pages(unsigned long addr,unsigned int order)
{
lx_free_pages(virt_to_page(virt_addr), PAGE_ALIGN(size) / PAGE_SIZE);
if (addr != 0ul)
__free_pages(virt_to_page((void *)addr), order);
}
static struct page * lx_alloc_pages(unsigned const nr_pages)
{
void const *ptr = lx_emul_mem_alloc_aligned(PAGE_SIZE*nr_pages, PAGE_SIZE);
struct page *page = lx_emul_virt_to_pages(ptr, nr_pages);
struct page *page = lx_emul_virt_to_page(ptr);
atomic_set(&page->_refcount, 1);
init_page_count(page);
return page;
}
@ -84,26 +83,16 @@ static struct page * lx_alloc_pages(unsigned const nr_pages)
/*
* In earlier kernel versions, '__alloc_pages' was an inline function.
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(5,12,0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)
struct page * __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t * nodemask)
#else
struct page * __alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t * nodemask)
#endif
{
return lx_alloc_pages(1u << order);
}
#endif
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
return lx_alloc_pages(PAGE_ALIGN(size) / PAGE_SIZE)->virtual;
}
void free_pages(unsigned long addr,unsigned int order)
{
if (addr != 0ul)
__free_pages(virt_to_page((void *)addr), order);
}
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
@ -115,3 +104,49 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
return (unsigned long)page_address(page);
}
/*
* Exact page allocation
*
* This implementation does only support alloc-free pairs that use the same
* size and does not set the page_count of pages beyond the head page. It is
* currently not possible to allocate individual but contiguous pages, which is
* required to satisfy Linux semantics.
*/
void free_pages_exact(void *virt_addr, size_t size)
{
struct page *page = lx_emul_virt_to_page(virt_addr);
if (!page)
return;
lx_free_pages(page, false);
}
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
size_t const nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
struct page *page = lx_alloc_pages(nr_pages);
if (!page)
return NULL;
return page_address(page);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0)
void __folio_put(struct folio * folio)
{
struct page *page = folio_page(folio, 0);
/* should only be called if refcount is 0 */
if (page_count(page) != 0)
printk("%s: page refocunt not 0 for page=%px\n", __func__, page);
lx_free_pages(&folio->page, true);
}
#endif

View File

@ -1,6 +1,7 @@
/*
* \brief Lx_emul backend for shared dma buffers
* \author Stefan Kalkowski
* \author Christian Helmuth
* \date 2022-03-02
*/
@ -24,11 +25,8 @@ lx_emul_shared_dma_buffer_allocate(unsigned long size)
{
Lx_kit::Mem_allocator::Buffer & b = Lx_kit::env().memory.alloc_buffer(size);
/*
* We have to call virt_to_pages eagerly here,
* to get contingous page objects registered
*/
lx_emul_virt_to_pages((void*)b.virt_addr(), size >> 12);
lx_emul_add_page_range((void *)b.virt_addr(), b.size());
return static_cast<genode_shared_dataspace*>(&b);
}
@ -36,10 +34,16 @@ lx_emul_shared_dma_buffer_allocate(unsigned long size)
extern "C" void
lx_emul_shared_dma_buffer_free(struct genode_shared_dataspace * ds)
{
lx_emul_forget_pages((void*)ds->virt_addr(), ds->size());
lx_emul_remove_page_range((void *)ds->virt_addr(), ds->size());
Lx_kit::env().memory.free_buffer((void*)ds->virt_addr());
}
extern "C" void *
lx_emul_shared_dma_buffer_virt_addr(struct genode_shared_dataspace * ds)
{
return (void *)ds->virt_addr();
}
Genode::addr_t
genode_shared_dataspace_local_address(struct genode_shared_dataspace * ds)

View File

@ -50,6 +50,9 @@ static int kernel_init(void * args)
struct task_struct *tsk = current;
set_task_comm(tsk, "init");
/* setup page struct for zero page in BSS */
lx_emul_add_page_range(empty_zero_page, PAGE_SIZE);
wait_for_completion(&kthreadd_done);
workqueue_init();
@ -119,9 +122,6 @@ int lx_emul_init_task_function(void * dtb)
static struct pt_regs regs;
set_irq_regs(&regs);
/* Run emulation library self-tests before starting kernel */
lx_emul_associate_page_selftest();
/**
* Here we do the minimum normally done start_kernel() of init/main.c
*/

View File

@ -1,6 +1,7 @@
/*
* \brief Linux DDE virt-to-page implementation
* \author Norman Feske
* \author Christian Helmuth
* \date 2021-07-02
*/
@ -17,78 +18,49 @@
#include <lx_emul/page_virt.h>
struct page *lx_emul_virt_to_pages(void const *virt, unsigned long count)
struct page *lx_emul_virt_to_page(void const *virt)
{
/* sanitize argument */
void * const page_aligned_virt = (void *)((uintptr_t)virt & PAGE_MASK);
struct page *page = lx_emul_associated_page(page_aligned_virt, 1);
if (!page) {
unsigned long i;
struct page * p = kzalloc(sizeof(struct page)*count, 0);
page = p;
for (i = 0; i < count; i++, p++) {
p->virtual = (void*)((uintptr_t)page_aligned_virt + i*PAGE_SIZE);
init_page_count(p);
lx_emul_associate_page_with_virt_addr(p, p->virtual);
}
}
/* consistency check */
if (page_aligned_virt != page->virtual)
BUG();
struct page * const page = lx_emul_associated_page(page_aligned_virt);
return page;
}
void lx_emul_forget_pages(void const *virt, unsigned long size)
void lx_emul_remove_page_range(void const *virt_addr, unsigned long size)
{
for (;;) {
struct page *page = lx_emul_associated_page(virt, size);
if (!page)
return;
unsigned i;
struct page *p;
lx_emul_disassociate_page_from_virt_addr(page->virtual);
kfree(page);
unsigned const nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
/* sanitize argument */
void * const page_aligned_virt = (void *)((uintptr_t)virt_addr & PAGE_MASK);
struct page * const page = lx_emul_associated_page(page_aligned_virt);
for (i = 0, p = page; i < nr_pages; i++, p++)
lx_emul_disassociate_page_from_virt_addr(p->virtual);
lx_emul_heap_free(page);
}
void lx_emul_add_page_range(void const *virt_addr, unsigned long size)
{
unsigned i;
struct page *p;
/* range may comprise a partial page at the end that needs a page struct too */
unsigned const nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
unsigned const space = sizeof(struct page)*nr_pages;
struct page * const page = lx_emul_heap_alloc(space);
for (i = 0, p = page; i < nr_pages; i++, p++) {
p->virtual = (void *)((uintptr_t)virt_addr + i*PAGE_SIZE);
set_page_count(p, 0);
lx_emul_associate_page_with_virt_addr(p, p->virtual);
}
}
#define LX_EMUL_ASSERT(cond) { if (!(cond)) {\
printk("assertion failed at line %d: %s\n", __LINE__, #cond); \
lx_emul_trace_and_stop("abort"); } }
void lx_emul_associate_page_selftest()
{
struct page *p1 = (struct page *)1;
struct page *p2 = (struct page *)2;
struct page *p3 = (struct page *)3;
void *v1 = (void *)0x11000;
void *v2 = (void *)0x12000;
void *v3 = (void *)0x13000;
lx_emul_associate_page_with_virt_addr(p1, v1);
lx_emul_associate_page_with_virt_addr(p2, v2);
lx_emul_associate_page_with_virt_addr(p3, v3);
LX_EMUL_ASSERT(lx_emul_associated_page(v1, 1) == p1);
LX_EMUL_ASSERT(lx_emul_associated_page(v2, 1) == p2);
LX_EMUL_ASSERT(lx_emul_associated_page(v3, 1) == p3);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)((uintptr_t)v1 + 4095), 1) == p1);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)((uintptr_t)v1 - 1), 1) == NULL);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)((uintptr_t)v2 & PAGE_MASK), 1) == p2);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)0x10000, 0x10000) == p2);
lx_emul_disassociate_page_from_virt_addr(v2);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)0x10000, 0x10000) == p3);
lx_emul_disassociate_page_from_virt_addr(v3);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)0x10000, 0x10000) == p1);
lx_emul_disassociate_page_from_virt_addr(v1);
LX_EMUL_ASSERT(lx_emul_associated_page((void *)0x10000, 0x10000) == NULL);
}

View File

@ -1,6 +1,7 @@
/*
* \brief Lx_kit memory allocation backend
* \author Stefan Kalkowski
* \author Christian Helmuth
* \date 2021-03-25
*/
@ -57,7 +58,8 @@ Genode::Dataspace_capability Lx_kit::Mem_allocator::attached_dataspace_cap(void
}
void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align)
void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align,
void (*new_range_cb)(void const *, unsigned long))
{
if (!size)
return nullptr;
@ -93,7 +95,7 @@ void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align)
_mem.add_range(buffer.virt_addr(), buffer.size() - 1);
/* re-try allocation */
return _mem.alloc_aligned(size, (unsigned)log2(align)).convert<void *>(
void * const virt_addr = _mem.alloc_aligned(size, (unsigned)log2(align)).convert<void *>(
[&] (void *ptr) { return cleared_allocation(ptr, size); },
@ -101,6 +103,11 @@ void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align)
error("memory allocation failed for ", size, " align ", align);
return nullptr; }
);
if (virt_addr)
new_range_cb((void *)buffer.virt_addr(), buffer.size() - 1);
return virt_addr;
}
);
}

View File

@ -27,7 +27,9 @@ class Non_dma_buffer : Attached_ram_dataspace,
using Attached_ram_dataspace::Attached_ram_dataspace;
size_t dma_addr() const override { return 0UL; }
/* emulate idempotent virt-dma mapping */
size_t dma_addr() const override {
return (size_t) Attached_ram_dataspace::local_addr<void>(); }
size_t size() const override {
return Attached_ram_dataspace::size(); }

View File

@ -382,25 +382,6 @@ asmlinkage __visible void dump_stack(void)
}
#include <linux/mm.h>
void __put_page(struct page * page)
{
__free_pages(page, 0);
}
#if LINUX_VERSION_CODE > KERNEL_VERSION(6,1,0)
#include <linux/mm.h>
void __folio_put(struct folio * folio)
{
__free_pages(&folio->page, 0);
kfree(folio);
}
#endif
#include <linux/prandom.h>
void prandom_bytes(void *buf, size_t bytes)
@ -437,7 +418,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
printk("%s: alloc might leak memory: fragsz: %u PAGE_SIZE: %lu "
"order: %u page: %p addr: %p\n", __func__, fragsz, PAGE_SIZE, order, page, page->virtual);
return page->virtual;
return page_address(page);
}
@ -445,7 +426,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
void page_frag_free(void * addr)
{
struct page *page = lx_emul_virt_to_pages(addr, 1ul);
struct page *page = virt_to_page(addr);
if (!page) {
printk("BUG %s: page for addr: %p not found\n", __func__, addr);
lx_emul_backtrace();

View File

@ -18,22 +18,6 @@
#include "lx_emul.h"
void *emul_alloc_shmem_file_buffer(unsigned long size)
{
if (!size)
return nullptr;
auto &buffer = Lx_kit::env().memory.alloc_buffer(size);
return reinterpret_cast<void *>(buffer.virt_addr());
}
void emul_free_shmem_file_buffer(void *addr)
{
Lx_kit::env().memory.free_buffer(addr);
}
unsigned short emul_intel_gmch_control_reg()
{
using namespace Genode;

View File

@ -28,14 +28,6 @@ int __cond_resched_lock(spinlock_t * lock)
struct cpumask __cpu_active_mask;
#include <linux/mm.h>
void __folio_put(struct folio * folio)
{
lx_emul_trace_and_stop(__func__);
}
extern void __i915_gpu_coredump_free(struct kref * error_ref);
void __i915_gpu_coredump_free(struct kref * error_ref)
{
@ -51,14 +43,6 @@ struct sock * __netlink_kernel_create(struct net * net,int unit,struct module *
}
#include <linux/pagevec.h>
void __pagevec_release(struct pagevec * pvec)
{
lx_emul_trace_and_stop(__func__);
}
#include <linux/printk.h>
void __printk_safe_enter(void)

View File

@ -14,6 +14,7 @@
#include <lx_emul.h>
#include <lx_emul/io_mem.h>
#include <lx_emul/page_virt.h>
#include <lx_emul/shmem_file.h>
#include <linux/dma-fence.h>
#include <linux/fs.h>
@ -77,132 +78,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
}
/*
* shmem handling as done by Josef etnaviv
*/
#include <linux/shmem_fs.h>
struct shmem_file_buffer
{
void *addr;
struct page *pages;
};
struct file *shmem_file_setup(char const *name, loff_t size,
unsigned long flags)
{
struct file *f;
struct inode *inode;
struct address_space *mapping;
struct shmem_file_buffer *private_data;
loff_t const nrpages = (size / PAGE_SIZE) + ((size % (PAGE_SIZE)) ? 1 : 0);
if (!size)
return (struct file*)ERR_PTR(-EINVAL);
f = kzalloc(sizeof (struct file), 0);
if (!f) {
return (struct file*)ERR_PTR(-ENOMEM);
}
inode = kzalloc(sizeof (struct inode), 0);
if (!inode) {
goto err_inode;
}
mapping = kzalloc(sizeof (struct address_space), 0);
if (!mapping) {
goto err_mapping;
}
private_data = kzalloc(sizeof (struct shmem_file_buffer), 0);
if (!private_data) {
goto err_private_data;
}
private_data->addr = emul_alloc_shmem_file_buffer(nrpages * PAGE_SIZE);
if (!private_data->addr)
goto err_private_data_addr;
/*
* We call virt_to_pages eagerly here, to get continuous page
* objects registered in case one wants to use them immediately.
*/
private_data->pages =
lx_emul_virt_to_pages(private_data->addr, nrpages);
mapping->private_data = private_data;
mapping->nrpages = nrpages;
inode->i_mapping = mapping;
atomic_long_set(&f->f_count, 1);
f->f_inode = inode;
f->f_mapping = mapping;
f->f_flags = flags;
f->f_mode = OPEN_FMODE(flags);
f->f_mode |= FMODE_OPENED;
return f;
err_private_data_addr:
kfree(private_data);
err_private_data:
kfree(mapping);
err_mapping:
kfree(inode);
err_inode:
kfree(f);
return (struct file*)ERR_PTR(-ENOMEM);
}
static void _free_file(struct file *file)
{
struct inode *inode;
struct address_space *mapping;
struct shmem_file_buffer *private_data;
mapping = file->f_mapping;
inode = file->f_inode;
private_data = mapping->private_data;
lx_emul_forget_pages(private_data->addr, mapping->nrpages << 12);
emul_free_shmem_file_buffer(private_data->addr);
kfree(private_data);
kfree(mapping);
kfree(inode);
kfree(file->f_path.dentry);
kfree(file);
}
void fput(struct file *file)
{
if (atomic_long_sub_and_test(1, &file->f_count)) {
_free_file(file);
}
}
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
struct page *p;
struct shmem_file_buffer *private_data;
if (index > mapping->nrpages)
return NULL;
private_data = mapping->private_data;
p = private_data->pages;
return (p + index);
}
extern int intel_root_gt_init_early(struct drm_i915_private * i915);
int intel_root_gt_init_early(struct drm_i915_private * i915)
{

View File

@ -26,9 +26,6 @@ extern "C" {
struct dma_fence_work;
struct dma_fence_work_ops;
void *emul_alloc_shmem_file_buffer(unsigned long);
void emul_free_shmem_file_buffer(void *);
void * intel_io_mem_map(unsigned long offset, unsigned long size);
#include "lx_i915.h"

View File

@ -10,6 +10,7 @@ INC_DIR += $(REL_PRG_DIR)
SRC_CC += main.cc
SRC_CC += emul.cc
SRC_CC += opregion_io_mem.cc
SRC_CC += lx_emul/shared_dma_buffer.cc
SRC_C += dummies.c
SRC_C += pci.c
SRC_C += lx_emul.c

View File

@ -63,7 +63,7 @@ void * page_frag_alloc_align(struct page_frag_cache *nc,
void page_frag_free(void * addr)
{
struct page *page = lx_emul_virt_to_pages(addr, 1ul);
struct page *page = lx_emul_virt_to_page(addr);
if (!page) {
printk("BUG %s: page for addr: %p not found\n", __func__, addr);
lx_emul_backtrace();

View File

@ -28,14 +28,6 @@ int __ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_kse
}
#include <linux/mm.h>
void __folio_put(struct folio * folio)
{
lx_emul_trace_and_stop(__func__);
}
#include <net/netlink.h>
int __nla_parse(struct nlattr ** tb,int maxtype,const struct nlattr * head,int len,const struct nla_policy * policy,unsigned int validate,struct netlink_ext_ack * extack)

View File

@ -1,7 +1,7 @@
/*
* \brief Dummy definitions of Linux Kernel functions
* \author Automatically generated file - do no edit
* \date 2023-11-02
* \date 2023-11-21
*/
#include <lx_emul.h>
@ -28,14 +28,6 @@ int __ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_kse
}
#include <linux/mm.h>
void __folio_put(struct folio * folio)
{
lx_emul_trace_and_stop(__func__);
}
#include <net/netlink.h>
int __nla_parse(struct nlattr ** tb,int maxtype,const struct nlattr * head,int len,const struct nla_policy * policy,unsigned int validate,struct netlink_ext_ack * extack)