corda/src/x86.S

435 lines
7.7 KiB
ArmAsm
Raw Normal View History

2009-03-15 18:02:36 +00:00
/* Copyright (c) 2008-2009, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
2007-06-29 02:58:48 +00:00
#include "types.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ || defined __MINGW32__ || defined __CYGWIN32__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
2007-06-29 02:58:48 +00:00
.text
2009-06-11 15:42:07 +00:00
2007-10-04 00:41:54 +00:00
#ifdef __x86_64__
2009-06-11 15:42:07 +00:00
#ifdef __MINGW32__
.globl GLOBAL(detectFeature)
GLOBAL(detectFeature):
pushq %rbp
movq %rsp, %rbp
pushq %rdx
pushq %rcx
pushq %rbx
pushq %rsi
pushq %rdi
movl %ecx, %edi
movl %edx, %esi
movl $1, %eax
cpuid
andl %esi, %edx
andl %edi, %ecx
orl %edx, %ecx
test %ecx, %ecx
je LOCAL(NOSSE)
movl $1, %eax
jmp LOCAL(SSEEND)
LOCAL(NOSSE):
movl $0, %eax
LOCAL(SSEEND):
popq %rdi
popq %rsi
popq %rbx
popq %rcx
popq %rdx
movq %rbp,%rsp
popq %rbp
ret
2009-06-11 15:42:07 +00:00
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
2009-06-11 15:42:07 +00:00
pushq %rbp
//save nonvolatile registers
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rsp, %rbp
// %rcx: function
// %rdx: arguments
// %r8: arguments count
// %r9: return type
movq %rcx, %r10
movq %rdx, %r11
movq %r8, %r12
movq %r9, %r13
// %r10: function
// %r11: arguments
// %r12: arguments count
// %r13: return type
//allocate initial stack space
subq $32, %rsp
//first arg
cmp $0, %r12
je LOCAL(call)
movq 0(%r11),%rcx
movq 0(%r11),%xmm0
subq $1, %r12
//second arg
cmp $0, %r12
je LOCAL(call)
movq 8(%r11),%rdx
movq 8(%r11),%xmm1
subq $1, %r12
//third arg
cmp $0, %r12
je LOCAL(call)
movq 16(%r11),%r8
movq 16(%r11),%xmm2
subq $1, %r12
//fourth arg
cmp $0, %r12
je LOCAL(call)
movq 24(%r11),%r9
movq 24(%r11),%xmm3
subq $1, %r12
//calculate stack space for arguments, aligned
movq $8, %r15
leaq (%r15, %r12, 8), %r15
andq $0xFFFFFFFFFFFFFFF0, %r15
//reserve stack space for arguments
subq %r15, %rsp
//reset the counter
addq $3, %r12
jmp LOCAL(loopend)
LOCAL(loop):
movq (%r11, %r12, 8), %r14
movq %r14, (%rsp, %r12, 8);
subq $1, %r12
LOCAL(loopend):
//we don't need to move arg 3 and lower
cmpq $3, %r12
jne LOCAL(loop)
LOCAL(call):
call *%r10
LOCAL(void):
cmpq $VOID_TYPE,%r13
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpq $FLOAT_TYPE,%r13
je LOCAL(copy)
cmpq $DOUBLE_TYPE,%r13
jne LOCAL(exit)
LOCAL(copy):
movq %xmm0,%rax
LOCAL(exit):
movq %rbp, %rsp
//return nonvolatile registers to their former state
popq %r15
popq %r14
popq %r13
popq %r12
2007-10-04 00:41:54 +00:00
2009-06-11 15:42:07 +00:00
popq %rbp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
2009-06-11 15:42:07 +00:00
movq %rdx,%rbp
movq 8(%rsp),%rax
movq 16(%rsp),%rdx
2009-06-11 15:42:07 +00:00
movq %r8,%rsp
movq %r9,%rbx
jmp *%rcx
#else // not __MINGW32__
.globl GLOBAL(detectFeature)
GLOBAL(detectFeature):
pushq %rbp
movq %rsp, %rbp
pushq %rdx
pushq %rcx
pushq %rbx
movl $1, %eax
cpuid
andl %esi, %edx
andl %edi, %ecx
orl %edx, %ecx
test %ecx, %ecx
je LOCAL(NOSSE)
movl $1, %eax
jmp LOCAL(SSEEND)
LOCAL(NOSSE):
movl $0, %eax
LOCAL(SSEEND):
popq %rbx
popq %rcx
popq %rdx
movq %rbp,%rsp
popq %rbp
ret
2009-06-11 15:42:07 +00:00
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
2007-06-29 02:58:48 +00:00
pushq %rbp
movq %rsp,%rbp
2007-06-29 02:58:48 +00:00
// %rdi aka -48(%rbp): function
// %rsi aka -40(%rbp): stack
// %rdx aka -32(%rbp): stackSize
// %rcx aka -24(%rbp): gprTable
// %r8 aka -16(%rbp): sseTable
// %r9 aka -8(%rbp): returnType
2007-06-29 02:58:48 +00:00
// save our argument registers so we can clobber them
pushq %r9
pushq %r8
pushq %rcx
pushq %rdx
pushq %rsi
pushq %rdi
// reserve space for arguments passed via memory
subq %rdx,%rsp
// align to a 16 byte boundary
andq $0xFFFFFFFFFFFFFFF0,%rsp
2007-06-29 02:58:48 +00:00
// copy memory arguments into place
movq $0,%rcx
jmp LOCAL(test)
2007-06-29 02:58:48 +00:00
LOCAL(loop):
2007-06-29 02:58:48 +00:00
movq %rcx,%rax
movq %rcx,%rdx
addq %rsp,%rdx
addq -40(%rbp),%rax
movq (%rax),%rax
2007-06-29 02:58:48 +00:00
movq %rax,(%rdx)
addq $8,%rcx
LOCAL(test):
cmpq -32(%rbp),%rcx
jb LOCAL(loop)
2007-06-29 02:58:48 +00:00
// do we need to load the general-purpose registers?
cmpq $0,-24(%rbp)
je LOCAL(sse)
2007-06-29 02:58:48 +00:00
// yes, we do
movq -24(%rbp),%rax
2007-06-29 02:58:48 +00:00
movq 0(%rax),%rdi
movq 8(%rax),%rsi
2007-06-29 16:42:39 +00:00
movq 16(%rax),%rdx
movq 24(%rax),%rcx
2007-06-29 02:58:48 +00:00
movq 32(%rax),%r8
movq 40(%rax),%r9
LOCAL(sse):
2007-06-29 02:58:48 +00:00
// do we need to load the SSE registers?
cmpq $0,-16(%rbp)
je LOCAL(call)
2007-06-29 02:58:48 +00:00
// yes, we do
movq -16(%rbp),%rax
2007-06-29 02:58:48 +00:00
movq 0(%rax),%xmm0
movq 8(%rax),%xmm1
movq 16(%rax),%xmm2
movq 24(%rax),%xmm3
movq 32(%rax),%xmm4
movq 40(%rax),%xmm5
movq 48(%rax),%xmm6
movq 64(%rax),%xmm7
LOCAL(call):
call *-48(%rbp)
2007-06-29 02:58:48 +00:00
// handle return value based on expected type
movq -8(%rbp),%rcx
2007-06-29 02:58:48 +00:00
LOCAL(void):
2007-06-29 02:58:48 +00:00
cmpq $VOID_TYPE,%rcx
jne LOCAL(float)
jmp LOCAL(exit)
2007-06-29 02:58:48 +00:00
LOCAL(float):
2007-06-29 02:58:48 +00:00
cmpq $FLOAT_TYPE,%rcx
je LOCAL(copy)
2007-06-29 02:58:48 +00:00
cmpq $DOUBLE_TYPE,%rcx
jne LOCAL(exit)
2007-06-29 02:58:48 +00:00
LOCAL(copy):
2009-10-14 16:01:37 +00:00
#ifdef __APPLE__
// as of OS X 10.6, Apple is still using an assembler that doesn't
// understand movq SSE,GPR, but movd does the same thing, despite
// the name
movd %xmm0,%rax
#else
2007-06-29 02:58:48 +00:00
movq %xmm0,%rax
2009-10-14 16:01:37 +00:00
#endif
2007-06-29 02:58:48 +00:00
LOCAL(exit):
2007-06-29 02:58:48 +00:00
movq %rbp,%rsp
popq %rbp
ret
2007-10-04 00:41:54 +00:00
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movq %rsi,%rbp
movq %rdx,%rsp
movq %rcx,%rbx
2009-05-15 02:08:01 +00:00
movq %r8,%rax
movq %r9,%rdx
jmp *%rdi
#endif // not __MINGW32__
2007-10-04 00:41:54 +00:00
#elif defined __i386__
2007-10-24 17:24:19 +00:00
.globl GLOBAL(detectFeature)
GLOBAL(detectFeature):
pushl %ebp
movl %esp, %ebp
pushl %edx
pushl %ecx
pushl %ebx
pushl %esi
pushl %edi
movl 12(%ebp), %esi
movl 8(%ebp), %edi
movl $1, %eax
cpuid
andl %esi, %edx
andl %edi, %ecx
orl %edx, %ecx
test %ecx, %ecx
je LOCAL(NOSSE)
movl $1, %eax
jmp LOCAL(SSEEND)
LOCAL(NOSSE):
movl $0, %eax
LOCAL(SSEEND):
popl %edi
popl %esi
popl %ebx
popl %ecx
popl %edx
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
2007-10-04 00:41:54 +00:00
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
2007-10-04 03:19:39 +00:00
subl %ecx,%esp
2007-10-04 00:41:54 +00:00
2007-10-24 17:24:19 +00:00
# ifdef __APPLE__
2007-10-04 00:41:54 +00:00
// align to a 16 byte boundary on Darwin
2007-10-04 03:19:39 +00:00
andl $0xFFFFFFF0,%esp
2007-10-24 17:24:19 +00:00
# endif
2007-10-04 00:41:54 +00:00
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
2007-10-04 00:41:54 +00:00
LOCAL(loop):
2007-10-04 00:41:54 +00:00
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
2007-10-04 00:41:54 +00:00
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
2007-10-04 00:41:54 +00:00
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
2007-10-04 00:41:54 +00:00
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
2007-10-04 00:41:54 +00:00
LOCAL(int64):
2007-10-04 00:41:54 +00:00
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
2007-10-04 00:41:54 +00:00
LOCAL(float):
2007-10-04 00:41:54 +00:00
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
2007-10-04 00:41:54 +00:00
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
2007-10-04 00:41:54 +00:00
LOCAL(double):
2007-10-04 00:41:54 +00:00
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
2007-10-04 00:41:54 +00:00
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
2007-10-04 00:41:54 +00:00
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
2009-05-25 00:57:59 +00:00
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
2009-05-25 00:57:59 +00:00
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
2009-05-25 00:57:59 +00:00
jmp *%esi
2007-10-04 00:41:54 +00:00
2009-06-11 15:42:07 +00:00
#endif //def __x86_64__