lx_emul: restrict allocations to minimal alignment

* Use the architecture-dependent minimal alignment for all allocations,
  e.g. on ARM it is necessary to have cacheline aligned allocations for DMA
* Remove the allocation functions without alignment from generic API
* Fix a warning

Fix #4268
This commit is contained in:
Stefan Kalkowski 2021-09-23 16:46:45 +02:00 committed by Norman Feske
parent 2ac8620f44
commit 6ae55d490b
6 changed files with 11 additions and 24 deletions

View File

@ -18,8 +18,6 @@
extern "C" {
#endif
void * lx_emul_mem_alloc(unsigned long size);
void * lx_emul_mem_alloc_uncached(unsigned long size);
void * lx_emul_mem_alloc_aligned(unsigned long size, unsigned long align);
void * lx_emul_mem_alloc_aligned_uncached(unsigned long size, unsigned long align);
unsigned long lx_emul_mem_dma_addr(void * addr);

View File

@ -25,28 +25,9 @@ extern "C" void * lx_emul_mem_alloc_aligned(unsigned long size, unsigned long al
};
extern "C" void * lx_emul_mem_alloc(unsigned long size)
{
/* always align memory objects to 32 bytes, like malloc, heap etc. */
void * const ptr = Lx_kit::env().memory.alloc(size, 32);
lx_emul_forget_pages(ptr, size);
return ptr;
};
extern "C" void * lx_emul_mem_alloc_uncached(unsigned long size)
{
/* always align memory objects to 32 bytes, like malloc, heap etc. */
void * const ptr = Lx_kit::env().uncached_memory.alloc(size, 32);
lx_emul_forget_pages(ptr, size);
return ptr;
};
extern "C" void * lx_emul_mem_alloc_aligned_uncached(unsigned long size,
unsigned long align)
{
/* always align memory objects to 32 bytes, like malloc, heap etc. */
void * const ptr = Lx_kit::env().uncached_memory.alloc(size, align);
lx_emul_forget_pages(ptr, size);
return ptr;

View File

@ -11,6 +11,7 @@
* version 2.
*/
#include <linux/slab.h>
#include <linux/memblock.h>
#include <lx_emul/alloc.h>
@ -20,5 +21,6 @@ void * __init memblock_alloc_try_nid(phys_addr_t size,
phys_addr_t max_addr,
int nid)
{
align = max(align, (phys_addr_t)KMALLOC_MIN_SIZE);
return lx_emul_mem_alloc_aligned(size, align);
}

View File

@ -12,10 +12,12 @@
*/
#include <linux/percpu.h>
#include <linux/slab.h>
#include <lx_emul/alloc.h>
void __percpu * __alloc_percpu(size_t size, size_t align)
{
align = max(align, (size_t)KMALLOC_MIN_SIZE);
return lx_emul_mem_alloc_aligned(size, align);
}

View File

@ -32,7 +32,7 @@ void * krealloc(const void * p,size_t new_size,gfp_t flags)
void *ret;
if (new_size <= old_size)
return p;
return (void*) p;
ret = kmalloc(new_size, flags);
memcpy(ret, p, old_size);

View File

@ -40,7 +40,7 @@ void * __kmalloc(size_t size, gfp_t flags)
if (flags & GFP_DMA)
lx_emul_trace_and_stop(__func__);
return lx_emul_mem_alloc(size);
return lx_emul_mem_alloc_aligned(size, ARCH_KMALLOC_MINALIGN);
}
@ -106,9 +106,13 @@ void __init kmem_cache_init(void)
void * kmem_cache_alloc(struct kmem_cache * s, gfp_t flags)
{
unsigned long align;
if (!s)
lx_emul_trace_and_stop(__func__);
return lx_emul_mem_alloc_aligned(s->size, s->align ? s->align : 32);
align = max(s->align, (unsigned int)ARCH_KMALLOC_MINALIGN);
return lx_emul_mem_alloc_aligned(s->size, align);
}