corda/src/x86.S

216 lines
3.8 KiB
ArmAsm
Raw Normal View History

/* Copyright (c) 2008, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
2007-06-29 02:58:48 +00:00
#include "types.h"
.text
2007-10-04 00:41:54 +00:00
#ifdef __x86_64__
2007-10-24 17:24:19 +00:00
.globl vmNativeCall
vmNativeCall:
2007-06-29 02:58:48 +00:00
pushq %rbp
movq %rsp,%rbp
2007-06-29 02:58:48 +00:00
// %rdi aka -48(%rbp): function
// %rsi aka -40(%rbp): stack
// %rdx aka -32(%rbp): stackSize
// %rcx aka -24(%rbp): gprTable
// %r8 aka -16(%rbp): sseTable
// %r9 aka -8(%rbp): returnType
2007-06-29 02:58:48 +00:00
// save our argument registers so we can clobber them
pushq %r9
pushq %r8
pushq %rcx
pushq %rdx
pushq %rsi
pushq %rdi
// reserve space for arguments passed via memory
subq %rdx,%rsp
// align to a 16 byte boundary
andq $0xFFFFFFFFFFFFFFF0,%rsp
2007-06-29 02:58:48 +00:00
// copy memory arguments into place
movq $0,%rcx
jmp test
loop:
movq %rcx,%rax
movq %rcx,%rdx
addq %rsp,%rdx
addq -40(%rbp),%rax
movq (%rax),%rax
2007-06-29 02:58:48 +00:00
movq %rax,(%rdx)
addq $8,%rcx
test:
cmpq -32(%rbp),%rcx
2007-06-29 02:58:48 +00:00
jb loop
// do we need to load the general-purpose registers?
cmpq $0,-24(%rbp)
2007-06-29 02:58:48 +00:00
je sse
// yes, we do
movq -24(%rbp),%rax
2007-06-29 02:58:48 +00:00
movq 0(%rax),%rdi
movq 8(%rax),%rsi
2007-06-29 16:42:39 +00:00
movq 16(%rax),%rdx
movq 24(%rax),%rcx
2007-06-29 02:58:48 +00:00
movq 32(%rax),%r8
movq 40(%rax),%r9
sse:
// do we need to load the SSE registers?
cmpq $0,-16(%rbp)
2007-06-29 02:58:48 +00:00
je call
// yes, we do
movq -16(%rbp),%rax
2007-06-29 02:58:48 +00:00
movq 0(%rax),%xmm0
movq 8(%rax),%xmm1
movq 16(%rax),%xmm2
movq 24(%rax),%xmm3
movq 32(%rax),%xmm4
movq 40(%rax),%xmm5
movq 48(%rax),%xmm6
movq 64(%rax),%xmm7
call:
call *-48(%rbp)
2007-06-29 02:58:48 +00:00
// handle return value based on expected type
movq -8(%rbp),%rcx
2007-06-29 02:58:48 +00:00
void:
cmpq $VOID_TYPE,%rcx
jne float
jmp exit
float:
cmpq $FLOAT_TYPE,%rcx
je copy
cmpq $DOUBLE_TYPE,%rcx
jne exit
copy:
movq %xmm0,%rax
exit:
movq %rbp,%rsp
popq %rbp
ret
2007-10-04 00:41:54 +00:00
.globl vmJump
vmJump:
movq %rsi,%rbp
movq %rdx,%rsp
movq %rcx,%rbx
jmp *%rdi
2007-10-04 00:41:54 +00:00
#elif defined __i386__
2007-10-24 17:24:19 +00:00
# if defined __APPLE__ || defined __MINGW32__
.globl _vmNativeCall
_vmNativeCall:
# else
.globl vmNativeCall
vmNativeCall:
# endif
2007-10-04 00:41:54 +00:00
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
2007-10-04 03:19:39 +00:00
subl %ecx,%esp
2007-10-04 00:41:54 +00:00
2007-10-24 17:24:19 +00:00
# ifdef __APPLE__
2007-10-04 00:41:54 +00:00
// align to a 16 byte boundary on Darwin
2007-10-04 03:19:39 +00:00
andl $0xFFFFFFF0,%esp
2007-10-24 17:24:19 +00:00
# endif
2007-10-04 00:41:54 +00:00
// copy arguments into place
movl $0,%ecx
jmp test
loop:
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
test:
cmpl 16(%ebp),%ecx
jb loop
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
void:
cmpl $VOID_TYPE,%ecx
jne int64
jmp exit
int64:
cmpl $INT64_TYPE,%ecx
jne float
jmp exit
float:
cmpl $FLOAT_TYPE,%ecx
jne double
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp exit
2007-10-04 00:41:54 +00:00
double:
cmpl $DOUBLE_TYPE,%ecx
jne exit
2007-10-04 00:41:54 +00:00
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
exit:
movl %ebp,%esp
popl %ebp
ret
# if defined __APPLE__ || defined __MINGW32__
.globl _vmJump
_vmJump:
# else
.globl vmJump
vmJump:
# endif
movl 4(%esp),%eax
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 12(%esp),%esp
jmp *%eax
2007-10-04 00:41:54 +00:00
#else
# error unsupported platform
#endif