split asm files by architecture (x86 -> i386,x86_64 ; arm -> arm,arm64)

This commit is contained in:
Joshua Warner 2014-12-10 14:41:46 -07:00
parent ba4f4a0faa
commit f8b9fcf198
10 changed files with 774 additions and 261 deletions

View File

@ -1206,7 +1206,7 @@ vm-sources = \
$(src)/jnienv.cpp \
$(src)/process.cpp
vm-asm-sources = $(src)/$(asm).$(asm-format)
vm-asm-sources = $(src)/$(arch).$(asm-format)
target-asm = $(asm)
@ -1257,7 +1257,7 @@ ifeq ($(process),compile)
endif
endif
vm-asm-sources += $(src)/compile-$(asm).$(asm-format)
vm-asm-sources += $(src)/compile-$(arch).$(asm-format)
endif
cflags += -DAVIAN_PROCESS_$(process)
ifeq ($(aot-only),true)

130
src/arm.S
View File

@ -21,134 +21,6 @@
# define GLOBAL(x) x
#endif
#ifdef __aarch64__
.globl GLOBAL(vmNativeCall)
.align 2
GLOBAL(vmNativeCall):
// arguments:
// x0 -> x19 : function
// w1 -> w20 : stackTotal
// x2 : memoryTable
// w3 : memoryCount
// x4 -> x21 : gprTable
// x5 -> x22 : vfpTable
// w6 -> w23 : returnType
// allocate frame
stp x29, x30, [sp,#-64]!
// save callee-saved register values so we can clobber them
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
str x23, [sp,#48]
// move arguments into callee-saved registers
mov x19, x0
mov w20, w1
mov x21, x4
mov x22, x5
mov w23, w6
// setup stack arguments if necessary
sub sp, sp, w20, uxtw // allocate stack
mov x9, sp
LOCAL(loop):
cmp w3, wzr
b.eq LOCAL(populateGPRs)
ldr x0, [x2], #8
str x0, [x9], #8
sub w3, w3, #8
b LOCAL(loop)
LOCAL(populateGPRs):
cmp x21, xzr
b.eq LOCAL(populateVFPs)
ldp x0, x1, [x21]
ldp x2, x3, [x21,#16]
ldp x4, x5, [x21,#32]
ldp x6, x7, [x21,#48]
LOCAL(populateVFPs):
cmp x22, xzr
b.eq LOCAL(doCall)
ldp d0, d1, [x22]
ldp d2, d3, [x22,#16]
ldp d4, d5, [x22,#32]
ldp d6, d7, [x22,#48]
LOCAL(doCall):
blr x19 // call function
add sp, sp, w20, uxtw // deallocate stack
cmp w23,#FLOAT_TYPE
b.ne LOCAL(double)
fmov w0,s0
b LOCAL(exit)
LOCAL(double):
cmp w23,#DOUBLE_TYPE
b.ne LOCAL(exit)
fmov x0,d0
LOCAL(exit):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldr x23, [sp,#48]
ldp x29, x30, [sp],#64
ret
.globl GLOBAL(vmJump)
.align 2
GLOBAL(vmJump):
mov x30, x0
mov x0, x4
mov x1, x5
mov sp, x2
mov x19, x3
br x30
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
.globl GLOBAL(vmRun)
.align 2
GLOBAL(vmRun):
// x0: function
// x1: arguments
// x2: checkpoint
// allocate frame
stp x29, x30, [sp,#-96]!
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
mov x19, sp
str x19, [x2, #CHECKPOINT_STACK]
mov x19, x0
ldr x0, [x2, #CHECKPOINT_THREAD]
blr x19
.globl GLOBAL(vmRun_returnAddress)
.align 2
GLOBAL(vmRun_returnAddress):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
br x30
#elif defined __arm__
.globl GLOBAL(vmNativeCall)
.align 2
GLOBAL(vmNativeCall):
@ -258,5 +130,3 @@ GLOBAL(vmRun_returnAddress):
add sp, sp, #12
ldmfd sp!, {r4-r11, lr}
bx lr
#endif // __arm__

146
src/arm64.S Normal file
View File

@ -0,0 +1,146 @@
/* arm.S: JNI gluecode for ARM
Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
.text
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.globl GLOBAL(vmNativeCall)
.align 2
GLOBAL(vmNativeCall):
// arguments:
// x0 -> x19 : function
// w1 -> w20 : stackTotal
// x2 : memoryTable
// w3 : memoryCount
// x4 -> x21 : gprTable
// x5 -> x22 : vfpTable
// w6 -> w23 : returnType
// allocate frame
stp x29, x30, [sp,#-64]!
// save callee-saved register values so we can clobber them
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
str x23, [sp,#48]
// move arguments into callee-saved registers
mov x19, x0
mov w20, w1
mov x21, x4
mov x22, x5
mov w23, w6
// setup stack arguments if necessary
sub sp, sp, w20, uxtw // allocate stack
mov x9, sp
LOCAL(loop):
cmp w3, wzr
b.eq LOCAL(populateGPRs)
ldr x0, [x2], #8
str x0, [x9], #8
sub w3, w3, #8
b LOCAL(loop)
LOCAL(populateGPRs):
cmp x21, xzr
b.eq LOCAL(populateVFPs)
ldp x0, x1, [x21]
ldp x2, x3, [x21,#16]
ldp x4, x5, [x21,#32]
ldp x6, x7, [x21,#48]
LOCAL(populateVFPs):
cmp x22, xzr
b.eq LOCAL(doCall)
ldp d0, d1, [x22]
ldp d2, d3, [x22,#16]
ldp d4, d5, [x22,#32]
ldp d6, d7, [x22,#48]
LOCAL(doCall):
blr x19 // call function
add sp, sp, w20, uxtw // deallocate stack
cmp w23,#FLOAT_TYPE
b.ne LOCAL(double)
fmov w0,s0
b LOCAL(exit)
LOCAL(double):
cmp w23,#DOUBLE_TYPE
b.ne LOCAL(exit)
fmov x0,d0
LOCAL(exit):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldr x23, [sp,#48]
ldp x29, x30, [sp],#64
ret
.globl GLOBAL(vmJump)
.align 2
GLOBAL(vmJump):
mov x30, x0
mov x0, x4
mov x1, x5
mov sp, x2
mov x19, x3
br x30
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
.globl GLOBAL(vmRun)
.align 2
GLOBAL(vmRun):
// x0: function
// x1: arguments
// x2: checkpoint
// allocate frame
stp x29, x30, [sp,#-96]!
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
mov x19, sp
str x19, [x2, #CHECKPOINT_STACK]
mov x19, x0
ldr x0, [x2, #CHECKPOINT_THREAD]
blr x19
.globl GLOBAL(vmRun_returnAddress)
.align 2
GLOBAL(vmRun_returnAddress):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
br x30

26
src/compile-arm64.S Normal file
View File

@ -0,0 +1,26 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
.text
#define BYTES_PER_WORD 4
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
#error not implemented

455
src/compile-x86_64.S Normal file
View File

@ -0,0 +1,455 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#ifdef __x86_64__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 8
#endif
#if defined __MINGW32__ || defined __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 64 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rcx: thread
// %rdx: function
// %r8 : arguments
// %r9 : argumentsFootprint
// 48(%rbp) : frameSize
// 56(%rbp) : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rcx)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rsi,40(%rsp)
movq %rdi,48(%rsp)
// allocate stack space for arguments
movl 48(%rbp),%eax
subq %rax,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rcx,%rbx
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%r8,%r11,1),%rsi
movq %rsi,(%rsp,%r11,1)
addq $8,%r11
LOCAL(vmInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rdx
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rsi
movq 48(%rsp),%rdi
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rcx: thread
// %rdx: address
// %r8 : stack
// %r9 : argumentFootprint
// 40(%rsp): arguments
// 48(%rsp): frameSize
// allocate new frame, adding room for callee-saved registers
movl 48(%rsp),%eax
subq %rax,%r8
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%r8
movq %rcx,%rbx
// set return address
leaq GLOBAL(vmInvoke_returnAddress)(%rip),%r10
movq %r10,(%r8)
// copy arguments into place
movq $0,%r11
movl 40(%rsp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%rax,%r11,1),%r10
movq %r10,8(%r8,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %r8,%rsp
jmp *%rdx
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#else // not __MINGW32__ || __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 48 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rdi: thread
// %rsi: function
// %rdx: arguments
// %rcx: argumentFootprint
// %r8 : frameSize
// %r9 : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rdi)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
// allocate stack space for arguments
subq %r8,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rdi,%rbx
// copy arguments into place
movq $0,%r9
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%rdx,%r9,1),%r8
movq %r8,(%rsp,%r9,1)
addq $8,%r9
LOCAL(vmInvoke_argumentTest):
cmpq %rcx,%r9
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rsi
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rdi: thread
// %rsi: address
// %rdx: stack
// %rcx: argumentFootprint
// %r8 : arguments
// %r9 : frameSize
// allocate new frame, adding room for callee-saved registers
subq %r9,%rdx
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rdx
movq %rdi,%rbx
// set return address
movq GLOBAL(vmInvoke_returnAddress)@GOTPCREL(%rip),%r10
movq %r10,(%rdx)
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%r8,%r11,1),%r10
movq %r10,8(%rdx,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %rcx,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %rdx,%rsp
jmp *%rsi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#endif // not __MINGW32__ || __CYGWIN32__
#elif defined __i386__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 12
#endif
#define CALLEE_SAVED_REGISTER_FOOTPRINT 16 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): thread
// 12(%ebp): function
// 16(%ebp): arguments
// 20(%ebp): argumentFootprint
// 24(%ebp): frameSize
// 28(%ebp): returnType
// allocate stack space for callee-saved registers
subl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movl 8(%ebp),%eax
movl %esp,TARGET_THREAD_SCRATCH(%eax)
movl %ebx,0(%esp)
movl %esi,4(%esp)
movl %edi,8(%esp)
// allocate stack space for arguments
subl 24(%ebp),%esp
// we use ebx to hold the thread pointer, by convention
mov %eax,%ebx
// copy arguments into place
movl $0,%ecx
movl 16(%ebp),%edx
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movl (%edx,%ecx,1),%eax
movl %eax,(%esp,%ecx,1)
addl $4,%ecx
LOCAL(vmInvoke_argumentTest):
cmpl 20(%ebp),%ecx
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *12(%ebp)
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movl TARGET_THREAD_SCRATCH(%ebx),%esp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movl $0,TARGET_THREAD_STACK(%ebx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movl 0(%esp),%ebx
movl 4(%esp),%esi
movl 8(%esp),%edi
addl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// handle return value based on expected type
movl 28(%esp),%ecx
popl %ebp
ret
LOCAL(getPC):
movl (%esp),%esi
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// 4(%esp): thread
// 8(%esp): address
// 12(%esp): stack
// 16(%esp): argumentFootprint
// 20(%esp): arguments
// 24(%esp): frameSize
movl 12(%esp),%ecx
// allocate new frame, adding room for callee-saved registers,
// return address, and frame pointer
subl 24(%esp),%ecx
subl $CALLEE_SAVED_REGISTER_FOOTPRINT+8,%ecx
movl 4(%esp),%ebx
// set return address
#if defined __MINGW32__ || defined __CYGWIN32__
movl $GLOBAL(vmInvoke_returnAddress),%esi
#else
call LOCAL(getPC)
# if defined __APPLE__
LOCAL(vmJumpAndInvoke_offset):
leal GLOBAL(vmInvoke_returnAddress)-LOCAL(vmJumpAndInvoke_offset)(%esi),%esi
# else
addl $_GLOBAL_OFFSET_TABLE_,%esi
movl GLOBAL(vmInvoke_returnAddress)@GOT(%esi),%esi
# endif
#endif
movl %esi,(%ecx)
// copy arguments into place
movl $0,%esi
movl 16(%esp),%edx
movl 20(%esp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movl (%eax,%esi,1),%edi
movl %edi,4(%ecx,%esi,1)
addl $4,%esi
LOCAL(vmJumpAndInvoke_argumentTest):
cmpl %edx,%esi
jb LOCAL(vmJumpAndInvoke_argumentLoop)
movl 8(%esp),%esi
// the arguments have been copied, so we can set the real stack
// pointer now
movl %ecx,%esp
jmp *%esi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // AVIAN_CONTINUATIONS
#else
#error unsupported architecture
#endif //def __x86_64__

145
src/i386.S Normal file
View File

@ -0,0 +1,145 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret

View File

@ -21,8 +21,6 @@
.text
#ifdef __x86_64__
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
@ -340,130 +338,3 @@ GLOBAL(vmRun_returnAddress):
ret
#endif // not __MINGW32__
#elif defined __i386__
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret
#endif // __i386__