diff --git a/classpath/java-io.cpp b/classpath/java-io.cpp index acb61554f4..1bf1a04fec 100644 --- a/classpath/java-io.cpp +++ b/classpath/java-io.cpp @@ -23,6 +23,7 @@ #ifdef WIN32 # include # include +# include # define OPEN _open # define CLOSE _close diff --git a/classpath/java-lang.cpp b/classpath/java-lang.cpp index 514bfc40e1..fd5c676ccd 100644 --- a/classpath/java-lang.cpp +++ b/classpath/java-lang.cpp @@ -81,7 +81,7 @@ namespace { int descriptor(JNIEnv* e, HANDLE h) { - int fd = _open_osfhandle(reinterpret_cast(h), 0); + int fd = _open_osfhandle(reinterpret_cast(h), 0); if (fd == -1) { throwNew(e, "java/io/IOException", strerror(errno)); } diff --git a/classpath/java/lang/Class.java b/classpath/java/lang/Class.java index 66529779d7..68cce10367 100644 --- a/classpath/java/lang/Class.java +++ b/classpath/java/lang/Class.java @@ -176,6 +176,8 @@ public final class Class implements Type, GenericDeclaration { private Method findMethod(String name, Class[] parameterTypes) { if (methodTable != null) { + if (parameterTypes == null) + parameterTypes = new Class[0]; for (int i = 0; i < methodTable.length; ++i) { if (methodTable[i].getName().equals(name) && match(parameterTypes, methodTable[i].getParameterTypes())) diff --git a/makefile b/makefile index cbd2d86db0..21b007dd13 100644 --- a/makefile +++ b/makefile @@ -18,10 +18,6 @@ bootimage-platform = \ $(subst cygwin,windows,$(subst mingw32,windows,$(build-platform))) platform = $(bootimage-platform) -ifeq ($(platform),windows) - arch = i386 -endif - mode = fast process = compile @@ -191,6 +187,21 @@ ifeq ($(platform),windows) native-path = cygpath -m endif endif + + ifeq ($(arch),x86_64) + cxx = x86_64-pc-mingw32-g++ + cc = x86_64-pc-mingw32-gcc + dlltool = x86_64-pc-mingw32-dlltool + ar = x86_64-pc-mingw32-ar + ranlib = x86_64-pc-mingw32-ranlib + objcopy = x86_64-pc-mingw32-objcopy + strip = : + inc = "$(root)/win64/include" + lib = "$(root)/win64/lib" + pointer-size = 8 + object-format = pe-x86-64 + endif + endif ifeq ($(mode),debug) @@ -636,4 +647,3 @@ $(executable-dynamic): $(driver-dynamic-object) $(dynamic-library) $(generator): $(generator-objects) @echo "linking $(@)" $(build-cc) $(^) $(build-lflags) -o $(@) - diff --git a/src/common.h b/src/common.h index 7f4abb3243..c9964b7b7d 100644 --- a/src/common.h +++ b/src/common.h @@ -40,10 +40,17 @@ # define ULD "u" #endif #elif defined __x86_64__ -# define LD "ld" -# define LX "lx" -# define LLD "ld" -# define ULD "lu" +# ifdef __MINGW32__ +# define LD "I64d" +# define LX "I64x" +# define LLD "I64d" +# define ULD "I64x" +# else +# define LD "ld" +# define LX "lx" +# define LLD "ld" +# define ULD "lu" +# endif #else # error "Unsupported architecture" #endif diff --git a/src/compile-x86.S b/src/compile-x86.S index cab2e20089..5c73e407dc 100644 --- a/src/compile-x86.S +++ b/src/compile-x86.S @@ -21,19 +21,140 @@ .text #ifdef __x86_64__ - -#define THREAD_CONTINUATION 168 -#define THREAD_EXCEPTION 64 -#define THREAD_EXCEPTION_STACK_ADJUSTMENT 176 -#define THREAD_EXCEPTION_OFFSET 184 -#define THREAD_EXCEPTION_HANDLER 192 -#define CONTINUATION_NEXT 8 -#define CONTINUATION_ADDRESS 32 -#define CONTINUATION_RETURN_ADDRESS_OFFSET 40 -#define CONTINUATION_FRAME_POINTER_OFFSET 48 -#define CONTINUATION_LENGTH 56 -#define CONTINUATION_BODY 64 +#ifdef __MINGW32__ + +#define CALLEE_SAVED_REGISTER_FOOTPRINT 64 + +.globl GLOBAL(vmInvoke) +GLOBAL(vmInvoke): + pushq %rbp + movq %rsp,%rbp + + // %rcx: thread + // %rdx: function + // %r8 : arguments + // %r9 : argumentsFootprint + // 48(%rbp) : frameSize + // 56(%rbp) : returnType (ignored) + + // allocate stack space, adding room for callee-saved registers + movl 48(%rbp),%eax + subq %rax,%rsp + subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp + + // save callee-saved registers + movq %rsp,%r11 + addq %rax,%r11 + + movq %rbx,0(%r11) + movq %r12,8(%r11) + movq %r13,16(%r11) + movq %r14,24(%r11) + movq %r15,32(%r11) + movq %rsi,40(%r11) + movq %rdi,48(%r11) + + // we use rbx to hold the thread pointer, by convention + mov %rcx,%rbx + + // copy arguments into place + movq $0,%r11 + jmp LOCAL(vmInvoke_argumentTest) + +LOCAL(vmInvoke_argumentLoop): + movq (%r8,%r11,1),%rsi + movq %rsi,(%rsp,%r11,1) + addq $8,%r11 + +LOCAL(vmInvoke_argumentTest): + cmpq %r9,%r11 + jb LOCAL(vmInvoke_argumentLoop) + + // call function + call *%rdx + +.globl GLOBAL(vmInvoke_returnAddress) +GLOBAL(vmInvoke_returnAddress): + // restore stack pointer + movq %rbp,%rsp + +#ifdef AVIAN_CONTINUATIONS +# include "continuations-x86.S" +#endif // AVIAN_CONTINUATIONS + + // restore callee-saved registers (below the stack pointer, but in + // the red zone) + movq %rsp,%r11 + subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%r11 + + movq 0(%r11),%rbx + movq 8(%r11),%r12 + movq 16(%r11),%r13 + movq 24(%r11),%r14 + movq 32(%r11),%r15 + movq 40(%r11),%rsi + movq 48(%r11),%rdi + + // return + popq %rbp + ret + +.globl GLOBAL(vmJumpAndInvoke) +GLOBAL(vmJumpAndInvoke): +#ifdef AVIAN_CONTINUATIONS + // %rcx: thread + // %rdx: address + // %r8 : base + // %r9 : (unused) + // 8(%rsp): argumentFootprint + // 16(%rsp): arguments + // 24(%rsp): frameSize + + movq %r8,%rbp + + // restore (pseudo)-stack pointer (we don't want to touch the real + // stack pointer, since we haven't copied the arguments yet) + movq %rbp,%r9 + + // allocate new frame, adding room for callee-saved registers + movl 24(%rsp),%eax + subq %rax,%r9 + subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%r9 + + movq %rcx,%rbx + + // set return address + movq vmInvoke_returnAddress@GOTPCREL(%rip),%r10 + movq %r10,(%r9) + + // copy arguments into place + movq $0,%r11 + movq 16(%rsp),%r8 + movq 8(%rsp),%rax + jmp LOCAL(vmJumpAndInvoke_argumentTest) + +LOCAL(vmJumpAndInvoke_argumentLoop): + movq (%r8,%r11,1),%r10 + movq %r10,8(%r9,%r11,1) + addq $8,%r11 + +LOCAL(vmJumpAndInvoke_argumentTest): + cmpq %rax,%r11 + jb LOCAL(vmJumpAndInvoke_argumentLoop) + + // the arguments have been copied, so we can set the real stack + // pointer now + movq %r9,%rsp + + jmp *%rdx +#else // not AVIAN_CONTINUATIONS + // vmJumpAndInvoke should only be called when continuations are + // enabled + int3 +#endif // not AVIAN_CONTINUATIONS + +#else // not __MINGW32__ #define CALLEE_SAVED_REGISTER_FOOTPRINT 48 @@ -88,65 +209,7 @@ GLOBAL(vmInvoke_returnAddress): movq %rbp,%rsp #ifdef AVIAN_CONTINUATIONS - // call the next continuation, if any - movq THREAD_CONTINUATION(%rbx),%rcx - cmpq $0,%rcx - je LOCAL(vmInvoke_exit) - - // allocate a frame of size (continuation.length * BYTES_PER_WORD) - // + CALLEE_SAVED_REGISTER_FOOTPRINT - movq CONTINUATION_LENGTH(%rcx),%rsi - shlq $3,%rsi - subq %rsi,%rsp - subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp - - // copy the continuation body into the frame - leaq CONTINUATION_BODY(%rcx),%rdi - - movq $0,%r9 - jmp LOCAL(vmInvoke_continuationTest) - -LOCAL(vmInvoke_continuationLoop): - movq (%rdi,%r9,1),%r8 - movq %r8,(%rsp,%r9,1) - addq $8,%r9 - -LOCAL(vmInvoke_continuationTest): - cmpq %rsi,%r9 - jb LOCAL(vmInvoke_continuationLoop) - - // set the return address to vmInvoke_returnAddress - movq CONTINUATION_RETURN_ADDRESS_OFFSET(%rcx),%rdi - movq vmInvoke_returnAddress@GOTPCREL(%rip),%r10 - movq %r10,(%rsp,%rdi,1) - - // save the current base pointer in the frame and update it - movq CONTINUATION_FRAME_POINTER_OFFSET(%rcx),%rdi - movq %rbp,(%rsp,%rdi,1) - addq %rsp,%rdi - movq %rdi,%rbp - - // consume the continuation - movq CONTINUATION_NEXT(%rcx),%rdi - movq %rdi,THREAD_CONTINUATION(%rbx) - - // call the continuation unless we're handling an exception - movq THREAD_EXCEPTION(%rbx),%rsi - cmpq $0,%rsi - jne LOCAL(vmInvoke_handleException) - jmp *CONTINUATION_ADDRESS(%rcx) - -LOCAL(vmInvoke_handleException): - // we're handling an exception - call the exception handler instead - movq $0,THREAD_EXCEPTION(%rbx) - movq THREAD_EXCEPTION_STACK_ADJUSTMENT(%rbx),%rdi - subq %rdi,%rsp - movq THREAD_EXCEPTION_OFFSET(%rbx),%rdi - movq %rsi,(%rsp,%rdi,1) - - jmp *THREAD_EXCEPTION_HANDLER(%rbx) - -LOCAL(vmInvoke_exit): +# include "continuations-x86.S" #endif // AVIAN_CONTINUATIONS // restore callee-saved registers (below the stack pointer, but in @@ -215,21 +278,10 @@ LOCAL(vmJumpAndInvoke_argumentTest): // enabled int3 #endif // not AVIAN_CONTINUATIONS + +#endif // not __MINGW32__ #elif defined __i386__ - -#define THREAD_CONTINUATION 96 -#define THREAD_EXCEPTION 36 -#define THREAD_EXCEPTION_STACK_ADJUSTMENT 100 -#define THREAD_EXCEPTION_OFFSET 104 -#define THREAD_EXCEPTION_HANDLER 108 - -#define CONTINUATION_NEXT 4 -#define CONTINUATION_ADDRESS 16 -#define CONTINUATION_RETURN_ADDRESS_OFFSET 20 -#define CONTINUATION_FRAME_POINTER_OFFSET 24 -#define CONTINUATION_LENGTH 28 -#define CONTINUATION_BODY 32 #define CALLEE_SAVED_REGISTER_FOOTPRINT 16 @@ -286,72 +338,7 @@ vmInvoke_returnAddress: movl %ecx,%esp #ifdef AVIAN_CONTINUATIONS - // call the next continuation, if any - movl THREAD_CONTINUATION(%ebx),%ecx - cmpl $0,%ecx - je LOCAL(vmInvoke_exit) - - // allocate a frame of size (continuation.length * BYTES_PER_WORD) - movl CONTINUATION_LENGTH(%ecx),%esi - shll $2,%esi - subl %esi,%esp - - // copy the continuation body into the frame - leal CONTINUATION_BODY(%ecx),%edi - - push %eax - push %edx - - movl $0,%edx - jmp LOCAL(vmInvoke_continuationTest) - -LOCAL(vmInvoke_continuationLoop): - movl (%edi,%edx,1),%eax - movl %eax,8(%esp,%edx,1) - addl $4,%edx - -LOCAL(vmInvoke_continuationTest): - cmpl %esi,%edx - jb LOCAL(vmInvoke_continuationLoop) - - pop %edx - pop %eax - - // set the return address to vmInvoke_returnAddress - movl CONTINUATION_RETURN_ADDRESS_OFFSET(%ecx),%edi - call LOCAL(getPC) - addl $_GLOBAL_OFFSET_TABLE_,%esi - movl vmInvoke_returnAddress@GOT(%esi),%esi - movl %esi,(%esp,%edi,1) - - // save the current base pointer in the frame and update it - movl CONTINUATION_FRAME_POINTER_OFFSET(%ecx),%edi - movl %ebp,(%esp,%edi,1) - addl %esp,%edi - movl %edi,%ebp - - // consume the continuation - movl CONTINUATION_NEXT(%ecx),%edi - movl %edi,THREAD_CONTINUATION(%ebx) - - // call the continuation unless we're handling an exception - movl THREAD_EXCEPTION(%ebx),%esi - cmpl $0,%esi - jne LOCAL(vmInvoke_handleException) - - jmp *CONTINUATION_ADDRESS(%ecx) - -LOCAL(vmInvoke_handleException): - // we're handling an exception - call the exception handler instead - movl $0,THREAD_EXCEPTION(%ebx) - movl THREAD_EXCEPTION_STACK_ADJUSTMENT(%ebx),%edi - subl %edi,%esp - movl THREAD_EXCEPTION_OFFSET(%ebx),%edi - movl %esi,(%esp,%edi,1) - - jmp *THREAD_EXCEPTION_HANDLER(%ebx) - -LOCAL(vmInvoke_exit): +# include "continuations-x86.S" #endif // AVIAN_CONTINUATIONS // restore callee-saved registers @@ -443,5 +430,5 @@ LOCAL(vmJumpAndInvoke_argumentTest): #endif // AVIAN_CONTINUATIONS #else -# error unsupported platform -#endif +#error unsupported architecture +#endif //def __x86_64__ diff --git a/src/continuations-x86.S b/src/continuations-x86.S new file mode 100644 index 0000000000..bc19003758 --- /dev/null +++ b/src/continuations-x86.S @@ -0,0 +1,161 @@ +#ifdef __x86_64__ + +#define THREAD_CONTINUATION 168 +#define THREAD_EXCEPTION 64 +#define THREAD_EXCEPTION_STACK_ADJUSTMENT 176 +#define THREAD_EXCEPTION_OFFSET 184 +#define THREAD_EXCEPTION_HANDLER 192 + +#define CONTINUATION_NEXT 8 +#define CONTINUATION_ADDRESS 32 +#define CONTINUATION_RETURN_ADDRESS_OFFSET 40 +#define CONTINUATION_FRAME_POINTER_OFFSET 48 +#define CONTINUATION_LENGTH 56 +#define CONTINUATION_BODY 64 + + // call the next continuation, if any + movq THREAD_CONTINUATION(%rbx),%rcx + cmpq $0,%rcx + je LOCAL(vmInvoke_exit) + + // allocate a frame of size (continuation.length * BYTES_PER_WORD) + // + CALLEE_SAVED_REGISTER_FOOTPRINT + movq CONTINUATION_LENGTH(%rcx),%rsi + shlq $3,%rsi + subq %rsi,%rsp + subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp + + // copy the continuation body into the frame + leaq CONTINUATION_BODY(%rcx),%rdi + + movq $0,%r9 + jmp LOCAL(vmInvoke_continuationTest) + +LOCAL(vmInvoke_continuationLoop): + movq (%rdi,%r9,1),%r8 + movq %r8,(%rsp,%r9,1) + addq $8,%r9 + +LOCAL(vmInvoke_continuationTest): + cmpq %rsi,%r9 + jb LOCAL(vmInvoke_continuationLoop) + + // set the return address to vmInvoke_returnAddress + movq CONTINUATION_RETURN_ADDRESS_OFFSET(%rcx),%rdi + movq vmInvoke_returnAddress@GOTPCREL(%rip),%r10 + movq %r10,(%rsp,%rdi,1) + + // save the current base pointer in the frame and update it + movq CONTINUATION_FRAME_POINTER_OFFSET(%rcx),%rdi + movq %rbp,(%rsp,%rdi,1) + addq %rsp,%rdi + movq %rdi,%rbp + + // consume the continuation + movq CONTINUATION_NEXT(%rcx),%rdi + movq %rdi,THREAD_CONTINUATION(%rbx) + + // call the continuation unless we're handling an exception + movq THREAD_EXCEPTION(%rbx),%rsi + cmpq $0,%rsi + jne LOCAL(vmInvoke_handleException) + jmp *CONTINUATION_ADDRESS(%rcx) + +LOCAL(vmInvoke_handleException): + // we're handling an exception - call the exception handler instead + movq $0,THREAD_EXCEPTION(%rbx) + movq THREAD_EXCEPTION_STACK_ADJUSTMENT(%rbx),%rdi + subq %rdi,%rsp + movq THREAD_EXCEPTION_OFFSET(%rbx),%rdi + movq %rsi,(%rsp,%rdi,1) + + jmp *THREAD_EXCEPTION_HANDLER(%rbx) + +LOCAL(vmInvoke_exit): + +#elif defined __i386__ + +#define THREAD_CONTINUATION 96 +#define THREAD_EXCEPTION 36 +#define THREAD_EXCEPTION_STACK_ADJUSTMENT 100 +#define THREAD_EXCEPTION_OFFSET 104 +#define THREAD_EXCEPTION_HANDLER 108 + +#define CONTINUATION_NEXT 4 +#define CONTINUATION_ADDRESS 16 +#define CONTINUATION_RETURN_ADDRESS_OFFSET 20 +#define CONTINUATION_FRAME_POINTER_OFFSET 24 +#define CONTINUATION_LENGTH 28 +#define CONTINUATION_BODY 32 + + // call the next continuation, if any + movl THREAD_CONTINUATION(%ebx),%ecx + cmpl $0,%ecx + je LOCAL(vmInvoke_exit) + + // allocate a frame of size (continuation.length * BYTES_PER_WORD) + movl CONTINUATION_LENGTH(%ecx),%esi + shll $2,%esi + subl %esi,%esp + + // copy the continuation body into the frame + leal CONTINUATION_BODY(%ecx),%edi + + push %eax + push %edx + + movl $0,%edx + jmp LOCAL(vmInvoke_continuationTest) + +LOCAL(vmInvoke_continuationLoop): + movl (%edi,%edx,1),%eax + movl %eax,8(%esp,%edx,1) + addl $4,%edx + +LOCAL(vmInvoke_continuationTest): + cmpl %esi,%edx + jb LOCAL(vmInvoke_continuationLoop) + + pop %edx + pop %eax + + // set the return address to vmInvoke_returnAddress + movl CONTINUATION_RETURN_ADDRESS_OFFSET(%ecx),%edi + call LOCAL(getPC) + addl $_GLOBAL_OFFSET_TABLE_,%esi + movl vmInvoke_returnAddress@GOT(%esi),%esi + movl %esi,(%esp,%edi,1) + + // save the current base pointer in the frame and update it + movl CONTINUATION_FRAME_POINTER_OFFSET(%ecx),%edi + movl %ebp,(%esp,%edi,1) + addl %esp,%edi + movl %edi,%ebp + + // consume the continuation + movl CONTINUATION_NEXT(%ecx),%edi + movl %edi,THREAD_CONTINUATION(%ebx) + + // call the continuation unless we're handling an exception + movl THREAD_EXCEPTION(%ebx),%esi + cmpl $0,%esi + jne LOCAL(vmInvoke_handleException) + + jmp *CONTINUATION_ADDRESS(%ecx) + +LOCAL(vmInvoke_handleException): + // we're handling an exception - call the exception handler instead + movl $0,THREAD_EXCEPTION(%ebx) + movl THREAD_EXCEPTION_STACK_ADJUSTMENT(%ebx),%edi + subl %edi,%esp + movl THREAD_EXCEPTION_OFFSET(%ebx),%edi + movl %esi,(%esp,%edi,1) + + jmp *THREAD_EXCEPTION_HANDLER(%ebx) + +LOCAL(vmInvoke_exit): + +#else +# error unsupported architecture +#endif + diff --git a/src/jnienv.cpp b/src/jnienv.cpp index 09fad54fdd..3f9d30b4b8 100644 --- a/src/jnienv.cpp +++ b/src/jnienv.cpp @@ -62,8 +62,10 @@ AttachCurrentThread(Machine* m, Thread** t, void*) *t = static_cast(m->localThread->get()); if (*t == 0) { *t = m->processor->makeThread(m, 0, m->rootThread); + m->system->attach(&((*t)->runnable)); enter(*t, Thread::ActiveState); + enter(*t, Thread::IdleState); m->localThread->set(*t); } diff --git a/src/machine.cpp b/src/machine.cpp index 3946e8f647..c327140f47 100644 --- a/src/machine.cpp +++ b/src/machine.cpp @@ -3292,3 +3292,29 @@ vmPrintTrace(Thread* t) t->m->processor->walkStack(t, &v); } + +// also for debugging +void* +vmAddressFromLine(Thread* t, object m, unsigned line) +{ + object code = methodCode(t, m); + printf("code: %p\n", code); + object lnt = codeLineNumberTable(t, code); + printf("lnt: %p\n", lnt); + + if (lnt) { + unsigned last = 0; + unsigned bottom = 0; + unsigned top = lineNumberTableLength(t, lnt); + for(unsigned i = bottom; i < top; i++) + { + LineNumber* ln = lineNumberTableBody(t, lnt, i); + if(lineNumberLine(ln) == line) + return reinterpret_cast(lineNumberIp(ln)); + else if(lineNumberLine(ln) > line) + return reinterpret_cast(last); + last = lineNumberIp(ln); + } + } + return 0; +} diff --git a/src/machine.h b/src/machine.h index bd0a582f22..3ab32d8fad 100644 --- a/src/machine.h +++ b/src/machine.h @@ -2448,4 +2448,7 @@ dumpHeap(Thread* t, FILE* out); void vmPrintTrace(vm::Thread* t); +void* +vmAddressFromLine(vm::Thread* t, vm::object m, unsigned line); + #endif//MACHINE_H diff --git a/src/windows.cpp b/src/windows.cpp index 300bcefc7b..a31089e606 100644 --- a/src/windows.cpp +++ b/src/windows.cpp @@ -55,7 +55,7 @@ run(void* r) return 0; } -const bool Verbose = false; +const bool Verbose = true; const unsigned Waiting = 1 << 0; const unsigned Notified = 1 << 1; @@ -574,11 +574,20 @@ class MySystem: public System { if (handler) { segFaultHandler = handler; +#ifdef __i386__ oldSegFaultHandler = SetUnhandledExceptionFilter(handleException); +#elif defined __x86_64__ + AddVectoredExceptionHandler(1, handleException); + oldSegFaultHandler = 0; +#endif return 0; } else if (segFaultHandler) { segFaultHandler = 0; +#ifdef __i386__ SetUnhandledExceptionFilter(oldSegFaultHandler); +#elif defined __x86_64__ + //do nothing, handlers are never "unregistered" anyway +#endif return 0; } else { return 1; @@ -600,10 +609,15 @@ class MySystem: public System { CONTEXT context; rv = GetThreadContext(target->thread, &context); expect(this, rv); - +#ifdef __i386__ visitor->visit(reinterpret_cast(context.Eip), reinterpret_cast(context.Ebp), reinterpret_cast(context.Esp)); +#elif defined __x86_64__ + visitor->visit(reinterpret_cast(context.Rip), + reinterpret_cast(context.Rbp), + reinterpret_cast(context.Rsp)); +#endif rv = ResumeThread(target->thread); expect(this, rv != -1); @@ -798,7 +812,7 @@ dump(LPEXCEPTION_POINTERS e, const char* directory) char name[MAX_PATH]; _timeb tb; _ftime(&tb); - snprintf(name, MAX_PATH, "%s\\crash-%lld.mdmp", directory, + snprintf(name, MAX_PATH, "%s\\crash-%"LLD".mdmp", directory, (static_cast(tb.time) * 1000) + static_cast(tb.millitm)); @@ -830,18 +844,31 @@ LONG CALLBACK handleException(LPEXCEPTION_POINTERS e) { if (e->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { +#ifdef __i386__ void* ip = reinterpret_cast(e->ContextRecord->Eip); void* base = reinterpret_cast(e->ContextRecord->Ebp); void* stack = reinterpret_cast(e->ContextRecord->Esp); void* thread = reinterpret_cast(e->ContextRecord->Ebx); +#elif defined __x86_64__ + void* ip = reinterpret_cast(e->ContextRecord->Rip); + void* base = reinterpret_cast(e->ContextRecord->Rbp); + void* stack = reinterpret_cast(e->ContextRecord->Rsp); + void* thread = reinterpret_cast(e->ContextRecord->Rbx); +#endif bool jump = system->segFaultHandler->handleSignal (&ip, &base, &stack, &thread); - +#ifdef __i386__ e->ContextRecord->Eip = reinterpret_cast(ip); e->ContextRecord->Ebp = reinterpret_cast(base); e->ContextRecord->Esp = reinterpret_cast(stack); e->ContextRecord->Ebx = reinterpret_cast(thread); +#elif defined __x86_64__ + e->ContextRecord->Rip = reinterpret_cast(ip); + e->ContextRecord->Rbp = reinterpret_cast(base); + e->ContextRecord->Rsp = reinterpret_cast(stack); + e->ContextRecord->Rbx = reinterpret_cast(thread); +#endif if (jump) { return EXCEPTION_CONTINUE_EXECUTION; diff --git a/src/x86.S b/src/x86.S index 67b31f872e..41e22aefe4 100644 --- a/src/x86.S +++ b/src/x86.S @@ -8,16 +8,145 @@ There is NO WARRANTY for this software. See license.txt for details. */ + #include "types.h" #define LOCAL(x) .L##x + +#if defined __APPLE__ || defined __MINGW32__ || defined __CYGWIN32__ +# define GLOBAL(x) _##x +#else +# define GLOBAL(x) x +#endif .text - + #ifdef __x86_64__ + +#ifdef __MINGW32__ + +.globl GLOBAL(vmNativeCall) +GLOBAL(vmNativeCall): + pushq %rbp + //save nonvolatile registers + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + movq %rsp, %rbp -.globl vmNativeCall -vmNativeCall: + + // %rcx: function + // %rdx: arguments + // %r8: arguments count + // %r9: return type + + movq %rcx, %r10 + movq %rdx, %r11 + movq %r8, %r12 + movq %r9, %r13 + + // %r10: function + // %r11: arguments + // %r12: arguments count + // %r13: return type + + //allocate initial stack space + subq $32, %rsp + + //first arg + cmp $0, %r12 + je LOCAL(call) + movq 0(%r11),%rcx + movq 0(%r11),%xmm0 + subq $1, %r12 + + //second arg + cmp $0, %r12 + je LOCAL(call) + movq 8(%r11),%rdx + movq 8(%r11),%xmm1 + subq $1, %r12 + + //third arg + cmp $0, %r12 + je LOCAL(call) + movq 16(%r11),%r8 + movq 16(%r11),%xmm2 + subq $1, %r12 + + //fourth arg + cmp $0, %r12 + je LOCAL(call) + movq 24(%r11),%r9 + movq 24(%r11),%xmm3 + subq $1, %r12 + + + //calculate stack space for arguments, aligned + movq $8, %r15 + leaq (%r15, %r12, 8), %r15 + andq $0xFFFFFFFFFFFFFFF0, %r15 + + //reserve stack space for arguments + subq %r15, %rsp + + //reset the counter + addq $3, %r12 + jmp LOCAL(loopend) + +LOCAL(loop): + movq (%r11, %r12, 8), %r14 + movq %r14, (%rsp, %r12, 8); + subq $1, %r12 + +LOCAL(loopend): + //we don't need to move arg 3 and lower + cmpq $3, %r12 + jne LOCAL(loop) + +LOCAL(call): + call *%r10 + +LOCAL(void): + cmpq $VOID_TYPE,%r13 + jne LOCAL(float) + jmp LOCAL(exit) + +LOCAL(float): + cmpq $FLOAT_TYPE,%r13 + je LOCAL(copy) + cmpq $DOUBLE_TYPE,%r13 + jne LOCAL(exit) + +LOCAL(copy): + movq %xmm0,%rax + +LOCAL(exit): + + movq %rbp, %rsp + //return nonvolatile registers to their former state + popq %r15 + popq %r14 + popq %r13 + popq %r12 + + popq %rbp + ret + +.globl GLOBAL(vmJump) +GLOBAL(vmJump): + movq 8(%rsp),%rax + movq 16(%rsp),%rdx + movq %rdx,%rbp + movq %r8,%rsp + movq %r9,%rbx + jmp *%rcx + +#else // not __MINGW32__ + +.globl GLOBAL(vmNativeCall) +GLOBAL(vmNativeCall): pushq %rbp movq %rsp,%rbp @@ -113,24 +242,21 @@ LOCAL(exit): popq %rbp ret -.globl vmJump -vmJump: +.globl GLOBAL(vmJump) +GLOBAL(vmJump): movq %rsi,%rbp movq %rdx,%rsp movq %rcx,%rbx movq %r8,%rax movq %r9,%rdx jmp *%rdi + +#endif // not __MINGW32__ #elif defined __i386__ -# if defined __APPLE__ || defined __MINGW32__ || defined __CYGWIN32__ -.globl _vmNativeCall -_vmNativeCall: -# else -.globl vmNativeCall -vmNativeCall: -# endif +.globl GLOBAL(vmNativeCall) +GLOBAL(vmNativeCall): pushl %ebp movl %esp,%ebp @@ -201,13 +327,8 @@ LOCAL(exit): popl %ebp ret -# if defined __APPLE__ || defined __MINGW32__ || defined __CYGWIN32__ -.globl _vmJump -_vmJump: -# else -.globl vmJump -vmJump: -# endif +.globl GLOBAL(vmJump) +GLOBAL(vmJump): movl 4(%esp),%esi movl 8(%esp),%ebp movl 16(%esp),%ebx @@ -216,6 +337,4 @@ vmJump: movl 12(%esp),%esp jmp *%esi -#else -# error unsupported platform -#endif +#endif //def __x86_64__ diff --git a/src/x86.cpp b/src/x86.cpp index 50c8383862..258f7e5821 100644 --- a/src/x86.cpp +++ b/src/x86.cpp @@ -7,6 +7,9 @@ There is NO WARRANTY for this software. See license.txt for details. */ + + +#if (defined __i386__) || (defined __x86_64__) #include "assembler.h" #include "vector.h" @@ -399,81 +402,114 @@ padding(AlignmentPadding* p, unsigned start, unsigned offset, return padding; } -void -encode(Context* c, uint8_t* instruction, unsigned length, int a, int b, - int32_t displacement, int index, unsigned scale) -{ - c->code.append(instruction, length); +#define REX_W 0x48 +#define REX_R 0x44 +#define REX_X 0x42 +#define REX_B 0x41 +#define REX_NONE 0x40 - uint8_t width; - if (displacement == 0 and b != rbp) { - width = 0; - } else if (isInt8(displacement)) { - width = 0x40; - } else { - width = 0x80; +void maybeRex(Context* c, unsigned size, int a, int index, int base, bool always) { + if(BytesPerWord == 8) { + uint8_t byte; + if(size == 8) { + byte = REX_W; + } else { + byte = REX_NONE; + } + if(a != NoRegister && (a & 8)) byte |= REX_R; + if(index != NoRegister && (index & 8)) byte |= REX_X; + if(base != NoRegister && (base & 8)) byte |= REX_B; + if(always or byte != REX_NONE) c->code.append(byte); } +} - if (index == -1) { - c->code.append(width | (a << 3) | b); - if (b == rsp) { - c->code.append(0x24); +inline void maybeRex(Context* c, unsigned size, Assembler::Register* a, + Assembler::Register* b) { + maybeRex(c, size, a->low, NoRegister, b->low, false); +} + +inline void alwaysRex(Context* c, unsigned size, Assembler::Register* a, + Assembler::Register* b) { + maybeRex(c, size, a->low, NoRegister, b->low, true); +} + +inline void maybeRex(Context* c, unsigned size, Assembler::Register* a) { + maybeRex(c, size, NoRegister, NoRegister, a->low, false); +} + +inline void maybeRex(Context* c, unsigned size, Assembler::Register* a, + Assembler::Memory* b) { + maybeRex(c, size, a->low, b->index, b->base, false); +} + +inline void maybeRex(Context* c, unsigned size, Assembler::Memory* a) { + maybeRex(c, size, NoRegister, a->index, a->base, false); +} + +inline int regCode(int a) { + return a & 7; +} + +inline int regCode(Assembler::Register* a) { + return regCode(a->low); +} + +inline void modrm(Context* c, uint8_t mod, int a, int b) { + c->code.append(mod | (regCode(b) << 3) | regCode(a)); +} + +inline void modrm(Context* c, uint8_t mod, Assembler::Register* a, + Assembler::Register* b) { + modrm(c, mod, a->low, b->low); +} + +inline void sib(Context* c, unsigned scale, int index, int base) { + c->code.append((log(scale) << 6) | (regCode(index) << 3) | regCode(base)); +} + +inline void modrmSib(Context* c, int width, int a, int scale, int index, int base) { + if(index == NoRegister) { + modrm(c, width, base, a); + if(regCode(base) == rsp) { + sib(c, 0x00, rsp, rsp); } } else { - assert(c, b != rsp); - c->code.append(width | (a << 3) | 4); - c->code.append((log(scale) << 6) | (index << 3) | b); + modrm(c, width, rsp, a); + sib(c, scale, index, base); } +} - if (displacement == 0 and b != rbp) { - // do nothing - } else if (isInt8(displacement)) { - c->code.append(displacement); +inline void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset) { + if(offset == 0 && regCode(base) != rbp) { + modrmSib(c, 0x00, a, scale, index, base); + } else if(isInt8(offset)) { + modrmSib(c, 0x40, a, scale, index, base); + c->code.append(offset); } else { - c->code.append4(displacement); + modrmSib(c, 0x80, a, scale, index, base); + c->code.append4(offset); } } + -void -rex(Context* c, uint8_t mask, int r) -{ - if (BytesPerWord == 8) { - c->code.append(mask | ((r & 8) >> 3)); - } +inline void modrmSibImm(Context* c, Assembler::Register* a, + Assembler::Memory* b) { + modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset); } -void -rex(Context* c) -{ - rex(c, 0x48, rax); +inline void opcode(Context* c, uint8_t op) { + c->code.append(op); } -void -encode(Context* c, uint8_t instruction, int a, Assembler::Memory* b, bool rex) -{ - if (rex) { - ::rex(c); - } - - encode(c, &instruction, 1, a, b->base, b->offset, b->index, b->scale); -} - -void -encode2(Context* c, uint16_t instruction, int a, Assembler::Memory* b, - bool rex) -{ - if (rex) { - ::rex(c); - } - - uint8_t i[2] = { instruction >> 8, instruction & 0xff }; - encode(c, i, 2, a, b->base, b->offset, b->index, b->scale); +inline void opcode(Context* c, uint8_t op1, uint8_t op2) { + c->code.append(op1); + c->code.append(op2); } void return_(Context* c) { - c->code.append(0xc3); + opcode(c, 0xc3); } void @@ -485,7 +521,7 @@ unconditional(Context* c, unsigned jump, Assembler::Constant* a) { appendOffsetTask(c, a->value, offset(c), 5); - c->code.append(jump); + opcode(c, jump); c->code.append4(0); } @@ -494,8 +530,7 @@ conditional(Context* c, unsigned condition, Assembler::Constant* a) { appendOffsetTask(c, a->value, offset(c), 6); - c->code.append(0x0f); - c->code.append(condition); + opcode(c, 0x0f, condition); c->code.append4(0); } @@ -565,9 +600,8 @@ jumpR(Context* c, unsigned size UNUSED, Assembler::Register* a) { assert(c, size == BytesPerWord); - if (a->low & 8) rex(c, 0x40, a->low); - c->code.append(0xff); - c->code.append(0xe0 | (a->low & 7)); + maybeRex(c, 4, a); + opcode(c, 0xff, 0xe0 + regCode(a)); } void @@ -582,8 +616,10 @@ void jumpM(Context* c, unsigned size UNUSED, Assembler::Memory* a) { assert(c, size == BytesPerWord); - - encode(c, 0xff, 4, a, false); + + maybeRex(c, 4, a); + opcode(c, 0xff); + modrmSibImm(c, rsp, a->scale, a->index, a->base, a->offset); } void @@ -653,17 +689,19 @@ callR(Context* c, unsigned size UNUSED, Assembler::Register* a) { assert(c, size == BytesPerWord); - if (a->low & 8) rex(c, 0x40, a->low); - c->code.append(0xff); - c->code.append(0xd0 | (a->low & 7)); + //maybeRex.W has no meaning here so we disable it + maybeRex(c, 4, a); + opcode(c, 0xff, 0xd0 + regCode(a)); } void callM(Context* c, unsigned size UNUSED, Assembler::Memory* a) { assert(c, size == BytesPerWord); - - encode(c, 0xff, 2, a, false); + + maybeRex(c, 4, a); + opcode(c, 0xff); + modrmSibImm(c, rdx, a->scale, a->index, a->base, a->offset); } void @@ -689,7 +727,8 @@ pushR(Context* c, unsigned size, Assembler::Register* a) pushR(c, 4, &ah); pushR(c, 4, a); } else { - c->code.append(0x50 | a->low); + maybeRex(c, 4, a); + opcode(c, 0x50 + regCode(a)); } } @@ -706,7 +745,8 @@ popR(Context* c, unsigned size, Assembler::Register* a) popR(c, 4, a); popR(c, 4, &ah); } else { - c->code.append(0x58 | a->low); + maybeRex(c, 4, a); + opcode(c, 0x58 + regCode(a)); if (BytesPerWord == 8 and size == 4) { moveRR(c, 4, a, 8, a); } @@ -724,7 +764,8 @@ popM(Context* c, unsigned size, Assembler::Memory* a) } else { assert(c, BytesPerWord == 4 or size == 8); - encode(c, 0x8f, 0, a, false); + opcode(c, 0x8f); + modrmSibImm(c, 0, a->scale, a->index, a->base, a->offset); } } @@ -747,9 +788,8 @@ negateR(Context* c, unsigned size, Assembler::Register* a) addCarryCR(c, 4, &zero, &ah); negateR(c, 4, &ah); } else { - if (size == 8) rex(c); - c->code.append(0xf7); - c->code.append(0xd8 | a->low); + maybeRex(c, size, a); + opcode(c, 0xf7, 0xd8 + regCode(a)); } } @@ -763,8 +803,8 @@ negateRR(Context* c, unsigned aSize, Assembler::Register* a, } void -moveCR2(Context* c, unsigned, Assembler::Constant* a, - unsigned bSize, Assembler::Register* b, unsigned promiseOffset) +moveCR2(Context* c, UNUSED unsigned aSize, Assembler::Constant* a, + UNUSED unsigned bSize, Assembler::Register* b, unsigned promiseOffset) { if (BytesPerWord == 4 and bSize == 8) { int64_t v = a->value->value(); @@ -780,8 +820,8 @@ moveCR2(Context* c, unsigned, Assembler::Constant* a, moveCR(c, 4, &al, 4, b); moveCR(c, 4, &ah, 4, &bh); } else { - rex(c, 0x48, b->low); - c->code.append(0xb8 | b->low); + maybeRex(c, BytesPerWord, b); + opcode(c, 0xb8 + regCode(b)); if (a->value->resolved()) { c->code.appendAddress(a->value->value()); } else { @@ -806,15 +846,16 @@ swapRR(Context* c, unsigned aSize UNUSED, Assembler::Register* a, assert(c, aSize == bSize); assert(c, aSize == BytesPerWord); - rex(c); - c->code.append(0x87); - c->code.append(0xc0 | (b->low << 3) | a->low); + alwaysRex(c, aSize, a, b); + opcode(c, 0x87); + modrm(c, 0xc0, b, a); } void moveRR(Context* c, unsigned aSize, Assembler::Register* a, - unsigned bSize, Assembler::Register* b) + UNUSED unsigned bSize, Assembler::Register* b) { + if (BytesPerWord == 4 and aSize == 8 and bSize == 8) { Assembler::Register ah(a->high); Assembler::Register bh(b->high); @@ -839,31 +880,28 @@ moveRR(Context* c, unsigned aSize, Assembler::Register* a, moveRR(c, BytesPerWord, a, BytesPerWord, b); moveRR(c, 1, b, BytesPerWord, b); } else { - rex(c); - c->code.append(0x0f); - c->code.append(0xbe); - c->code.append(0xc0 | (b->low << 3) | a->low); + alwaysRex(c, aSize, b, a); + opcode(c, 0x0f, 0xbe); + modrm(c, 0xc0, a, b); } break; case 2: - rex(c); - c->code.append(0x0f); - c->code.append(0xbf); - c->code.append(0xc0 | (b->low << 3) | a->low); + alwaysRex(c, aSize, b, a); + opcode(c, 0x0f, 0xbf); + modrm(c, 0xc0, a, b); break; - case 8: case 4: - if (aSize == 4 and bSize == 8) { - if (BytesPerWord == 8) { - rex(c); - c->code.append(0x63); - c->code.append(0xc0 | (b->low << 3) | a->low); - } else { - if (a->low == rax and b->low == rax and b->high == rdx) { - c->code.append(0x99); // cdq - } else { + if (bSize == 8) { + if (BytesPerWord == 8) { + alwaysRex(c, aSize, b, a); + opcode(c, 0x63); + modrm(c, 0xc0, a, b); + } else { + if (a->low == rax and b->low == rax and b->high == rdx) { + opcode(c, 0x99); //cdq + } else { assert(c, b->low == rax and b->high == rdx); moveRR(c, 4, a, 4, b); @@ -872,14 +910,22 @@ moveRR(Context* c, unsigned aSize, Assembler::Register* a, } } else { if (a->low != b->low) { - rex(c); - c->code.append(0x89); - c->code.append(0xc0 | (a->low << 3) | b->low); + alwaysRex(c, aSize, a, b); + opcode(c, 0x89); + modrm(c, 0xc0, b, a); } } + break; + + case 8: + if (a->low != b->low){ + maybeRex(c, aSize, a, b); + opcode(c, 0x89); + modrm(c, 0xc0, b, a); + } break; } - } + } } void @@ -888,38 +934,49 @@ moveMR(Context* c, unsigned aSize, Assembler::Memory* a, { switch (aSize) { case 1: - encode2(c, 0x0fbe, b->low, a, true); + maybeRex(c, bSize, b, a); + opcode(c, 0x0f, 0xbe); + modrmSibImm(c, b, a); break; case 2: - encode2(c, 0x0fbf, b->low, a, true); + maybeRex(c, bSize, b, a); + opcode(c, 0x0f, 0xbf); + modrmSibImm(c, b, a); break; case 4: - case 8: - if (aSize == 4 and bSize == 8) { - if (BytesPerWord == 8) { - encode(c, 0x63, b->low, a, true); - } else { + if (BytesPerWord == 8) { + maybeRex(c, bSize, b, a); + opcode(c, 0x63); + modrmSibImm(c, b, a); + } else { + if (bSize == 8) { assert(c, b->low == rax and b->high == rdx); moveMR(c, 4, a, 4, b); moveRR(c, 4, b, 8, b); - } - } else { - if (BytesPerWord == 4 and aSize == 8 and bSize == 8) { - Assembler::Memory ah(a->base, a->offset + 4, a->index, a->scale); - Assembler::Register bh(b->high); - - moveMR(c, 4, a, 4, b); - moveMR(c, 4, &ah, 4, &bh); - } else if (BytesPerWord == 8 and aSize == 4) { - encode(c, 0x63, b->low, a, true); } else { - encode(c, 0x8b, b->low, a, true); + maybeRex(c, bSize, b, a); + opcode(c, 0x8b); + modrmSibImm(c, b, a); } } break; + + case 8: + if (BytesPerWord == 4 and bSize == 8) { + Assembler::Memory ah(a->base, a->offset + 4, a->index, a->scale); + Assembler::Register bh(b->high); + + moveMR(c, 4, a, 4, b); + moveMR(c, 4, &ah, 4, &bh); + } else { + maybeRex(c, bSize, b, a); + opcode(c, 0x8b); + modrmSibImm(c, b, a); + } + break; default: abort(c); } @@ -930,41 +987,48 @@ moveRM(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize UNUSED, Assembler::Memory* b) { assert(c, aSize == bSize); + + switch (aSize) { + case 1: + maybeRex(c, bSize, a, b); + opcode(c, 0x88); + modrmSibImm(c, a, b); + break; - if (BytesPerWord == 4 and aSize == 8) { - Assembler::Register ah(a->high); - Assembler::Memory bh(b->base, b->offset + 4, b->index, b->scale); + case 2: + opcode(c, 0x66); + maybeRex(c, bSize, a, b); + opcode(c, 0x89); + modrmSibImm(c, a, b); + break; - moveRM(c, 4, a, 4, b); - moveRM(c, 4, &ah, 4, &bh); - } else if (BytesPerWord == 8 and aSize == 4) { - encode(c, 0x89, a->low, b, false); - } else { - switch (aSize) { - case 1: - if (BytesPerWord == 8) { - if (a->low > rbx) { - encode2(c, 0x4088, a->low, b, false); - } else { - encode(c, 0x88, a->low, b, false); - } - } else { - assert(c, a->low <= rbx); - - encode(c, 0x88, a->low, b, false); - } + case 4: + if (BytesPerWord == 8) { + maybeRex(c, bSize, a, b); + opcode(c, 0x89); + modrmSibImm(c, a, b); break; - - case 2: - encode2(c, 0x6689, a->low, b, false); - break; - - case BytesPerWord: - encode(c, 0x89, a->low, b, true); - break; - - default: abort(c); + } else { + opcode(c, 0x89); + modrmSibImm(c, a, b); } + break; + + case 8: + if(BytesPerWord == 8) { + maybeRex(c, bSize, a, b); + opcode(c, 0x89); + modrmSibImm(c, a, b); + } else { + Assembler::Register ah(a->high); + Assembler::Memory bh(b->base, b->offset + 4, b->index, b->scale); + + moveRM(c, 4, a, 4, b); + moveRM(c, 4, &ah, 4, &bh); + } + break; + + default: abort(c); } } @@ -994,17 +1058,24 @@ moveCM(Context* c, unsigned aSize UNUSED, Assembler::Constant* a, { switch (bSize) { case 1: - encode(c, 0xc6, 0, b, false); + maybeRex(c, bSize, b); + opcode(c, 0xc6); + modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset); c->code.append(a->value->value()); break; case 2: - encode2(c, 0x66c7, 0, b, false); + opcode(c, 0x66); + maybeRex(c, bSize, b); + opcode(c, 0xc7); + modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset); c->code.append2(a->value->value()); break; case 4: - encode(c, 0xc7, 0, b, false); + maybeRex(c, bSize, b); + opcode(c, 0xc7); + modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset); if (a->value->resolved()) { c->code.append4(a->value->value()); } else { @@ -1014,9 +1085,11 @@ moveCM(Context* c, unsigned aSize UNUSED, Assembler::Constant* a, break; case 8: { - if (BytesPerWord == 8) { + if (BytesPerWord == 8) { if(a->value->resolved() and isInt32(a->value->value())) { - encode(c, 0xc7, 0, b, true); + maybeRex(c, bSize, b); + opcode(c, 0xc7); + modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset); c->code.append4(a->value->value()); } else { Assembler::Register tmp(c->client->acquireTemporary()); @@ -1024,8 +1097,8 @@ moveCM(Context* c, unsigned aSize UNUSED, Assembler::Constant* a, moveRM(c, 8, &tmp, 8, b); c->client->releaseTemporary(tmp.low); } - } else { - Assembler::Constant ah(shiftMaskPromise(c, a->value, 32, 0xFFFFFFFF)); + } else { + Assembler::Constant ah(shiftMaskPromise(c, a->value, 32, 0xFFFFFFFF)); Assembler::Constant al(shiftMaskPromise(c, a->value, 0, 0xFFFFFFFF)); Assembler::Memory bh(b->base, b->offset + 4, b->index, b->scale); @@ -1045,10 +1118,9 @@ moveZRR(Context* c, unsigned aSize, Assembler::Register* a, { switch (aSize) { case 2: - rex(c); - c->code.append(0x0f); - c->code.append(0xb7); - c->code.append(0xc0 | (b->low << 3) | a->low); + alwaysRex(c, aSize, b, a); + opcode(c, 0x0f, 0xb7); + modrm(c, 0xc0, a, b); break; default: abort(c); @@ -1061,8 +1133,10 @@ moveZMR(Context* c, unsigned aSize UNUSED, Assembler::Memory* a, { assert(c, bSize == BytesPerWord); assert(c, aSize == 2); - - encode2(c, 0x0fb7, b->low, a, true); + + maybeRex(c, bSize, b, a); + opcode(c, 0x0f, 0xb7); + modrmSibImm(c, b->low, a->scale, a->index, a->base, a->offset); } void @@ -1071,9 +1145,9 @@ addCarryRR(Context* c, unsigned size, Assembler::Register* a, { assert(c, BytesPerWord == 8 or size == 4); - if (size == 8) rex(c); - c->code.append(0x11); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, size, a, b); + opcode(c, 0x11); + modrm(c, 0xc0, b, a); } void @@ -1089,9 +1163,9 @@ addRR(Context* c, unsigned aSize, Assembler::Register* a, addRR(c, 4, a, 4, b); addCarryRR(c, 4, &ah, &bh); } else { - if (aSize == 8) rex(c); - c->code.append(0x01); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x01); + modrm(c, 0xc0, b, a); } } @@ -1099,12 +1173,11 @@ void addCarryCR(Context* c, unsigned size UNUSED, Assembler::Constant* a, Assembler::Register* b) { - assert(c, BytesPerWord == 8 or size == 4); int64_t v = a->value->value(); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xd0 | b->low); + maybeRex(c, size, b); + opcode(c, 0x83, 0xd0 + regCode(b)); c->code.append(v); } else { abort(c); @@ -1132,14 +1205,12 @@ addCR(Context* c, unsigned aSize, Assembler::Constant* a, addCarryCR(c, 4, &ah, &bh); } else { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xc0 | b->low); + opcode(c, 0x83, 0xc0 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xc0 | b->low); + opcode(c, 0x81, 0xc0 + regCode(b)); c->code.append4(v); } } else { @@ -1160,8 +1231,7 @@ subtractBorrowCR(Context* c, unsigned size UNUSED, Assembler::Constant* a, int64_t v = a->value->value(); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xd8 | b->low); + opcode(c, 0x83, 0xd8 + regCode(b)); c->code.append(v); } else { abort(c); @@ -1193,14 +1263,12 @@ subtractCR(Context* c, unsigned aSize, Assembler::Constant* a, subtractBorrowCR(c, 4, &ah, &bh); } else { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xe8 | b->low); + opcode(c, 0x83, 0xe8 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xe8 | b->low); + opcode(c, 0x81, 0xe8 + regCode(b)); c->code.append4(v); } } else { @@ -1219,9 +1287,9 @@ subtractBorrowRR(Context* c, unsigned size, Assembler::Register* a, { assert(c, BytesPerWord == 8 or size == 4); - if (size == 8) rex(c); - c->code.append(0x19); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, size, a, b); + opcode(c, 0x19); + modrm(c, 0xc0, b, a); } void @@ -1229,7 +1297,7 @@ subtractRR(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize UNUSED, Assembler::Register* b) { assert(c, aSize == bSize); - + if (BytesPerWord == 4 and aSize == 8) { Assembler::Register ah(a->high); Assembler::Register bh(b->high); @@ -1237,9 +1305,9 @@ subtractRR(Context* c, unsigned aSize, Assembler::Register* a, subtractRR(c, 4, a, 4, b); subtractBorrowRR(c, 4, &ah, &bh); } else { - if (aSize == 8) rex(c); - c->code.append(0x29); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x29); + modrm(c, 0xc0, b, a); } } @@ -1249,6 +1317,7 @@ andRR(Context* c, unsigned aSize, Assembler::Register* a, { assert(c, aSize == bSize); + if (BytesPerWord == 4 and aSize == 8) { Assembler::Register ah(a->high); Assembler::Register bh(b->high); @@ -1256,9 +1325,9 @@ andRR(Context* c, unsigned aSize, Assembler::Register* a, andRR(c, 4, a, 4, b); andRR(c, 4, &ah, 4, &bh); } else { - if (aSize == 8) rex(c); - c->code.append(0x21); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x21); + modrm(c, 0xc0, b, a); } } @@ -1283,14 +1352,12 @@ andCR(Context* c, unsigned aSize, Assembler::Constant* a, andCR(c, 4, &ah, 4, &bh); } else { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xe0 | b->low); + opcode(c, 0x83, 0xe0 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xe0 | b->low); + opcode(c, 0x81, 0xe0 + regCode(b)); c->code.append4(v); } } else { @@ -1315,9 +1382,9 @@ orRR(Context* c, unsigned aSize, Assembler::Register* a, orRR(c, 4, a, 4, b); orRR(c, 4, &ah, 4, &bh); } else { - if (aSize == 8) rex(c); - c->code.append(0x09); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x09); + modrm(c, 0xc0, b, a); } } @@ -1342,14 +1409,12 @@ orCR(Context* c, unsigned aSize, Assembler::Constant* a, orCR(c, 4, &ah, 4, &bh); } else { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xc8 | b->low); + opcode(c, 0x83, 0xc8 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xc8 | b->low); + opcode(c, 0x81, 0xc8 + regCode(b)); c->code.append4(v); } } else { @@ -1373,9 +1438,9 @@ xorRR(Context* c, unsigned aSize, Assembler::Register* a, xorRR(c, 4, a, 4, b); xorRR(c, 4, &ah, 4, &bh); } else { - if (aSize == 8) rex(c); - c->code.append(0x31); - c->code.append(0xc0 | (a->low << 3) | b->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x31); + modrm(c, 0xc0, b, a); } } @@ -1400,14 +1465,12 @@ xorCR(Context* c, unsigned aSize, Assembler::Constant* a, xorCR(c, 4, &ah, 4, &bh); } else { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xf0 | b->low); + opcode(c, 0x83, 0xf0 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xf0 | b->low); + opcode(c, 0x81, 0xf0 + regCode(b)); c->code.append4(v); } } else { @@ -1426,6 +1489,7 @@ multiplyRR(Context* c, unsigned aSize, Assembler::Register* a, { assert(c, aSize == bSize); + if (BytesPerWord == 4 and aSize == 8) { assert(c, b->high == rdx); assert(c, b->low != rax); @@ -1444,16 +1508,14 @@ multiplyRR(Context* c, unsigned aSize, Assembler::Register* a, addRR(c, 4, &bh, 4, b); // mul a->low,%eax%edx - c->code.append(0xf7); - c->code.append(0xe0 | a->low); + opcode(c, 0xf7, 0xe0 + a->low); addRR(c, 4, b, 4, &bh); moveRR(c, 4, &axdx, 4, b); } else { - if (aSize == 8) rex(c); - c->code.append(0x0f); - c->code.append(0xaf); - c->code.append(0xc0 | (b->low << 3) | a->low); + maybeRex(c, aSize, b, a); + opcode(c, 0x0f, 0xaf); + modrm(c, 0xc0, a, b); } } @@ -1462,11 +1524,11 @@ compareRR(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize UNUSED, Assembler::Register* b) { assert(c, aSize == bSize); - assert(c, BytesPerWord == 8 or aSize == 4); - if (aSize == 8) rex(c); - c->code.append(0x39); - c->code.append(0xc0 | (a->low << 3) | b->low); + + maybeRex(c, aSize, a, b); + opcode(c, 0x39); + modrm(c, 0xc0, b, a); } void @@ -1478,14 +1540,12 @@ compareCR(Context* c, unsigned aSize, Assembler::Constant* a, if (a->value->resolved() and isInt32(a->value->value())) { int64_t v = a->value->value(); - if (aSize == 8) rex(c); + maybeRex(c, aSize, b); if (isInt8(v)) { - c->code.append(0x83); - c->code.append(0xf8 | b->low); + opcode(c, 0x83, 0xf8 + regCode(b)); c->code.append(v); } else { - c->code.append(0x81); - c->code.append(0xf8 | b->low); + opcode(c, 0x81, 0xf8 + regCode(b)); c->code.append4(v); } } else { @@ -1515,14 +1575,14 @@ multiplyCR(Context* c, unsigned aSize, Assembler::Constant* a, int64_t v = a->value->value(); if (v != 1) { if (isInt32(v)) { - if (bSize == 8) rex(c); + maybeRex(c, bSize, b, b); if (isInt8(v)) { - c->code.append(0x6b); - c->code.append(0xc0 | (b->low << 3) | b->low); + opcode(c, 0x6b); + modrm(c, 0xc0, b, b); c->code.append(v); } else { - c->code.append(0x69); - c->code.append(0xc0 | (b->low << 3) | b->low); + opcode(c, 0x69); + modrm(c, 0xc0, b, b); c->code.append4(v); } } else { @@ -1545,7 +1605,9 @@ compareRM(Context* c, unsigned aSize, Assembler::Register* a, if (BytesPerWord == 8 and aSize == 4) { moveRR(c, 4, a, 8, a); } - encode(c, 0x39, a->low, b, true); + maybeRex(c, bSize, a, b); + opcode(c, 0x39); + modrmSibImm(c, a, b); } void @@ -1557,7 +1619,9 @@ compareCM(Context* c, unsigned aSize, Assembler::Constant* a, if (a->value->resolved()) { int64_t v = a->value->value(); - encode(c, isInt8(v) ? 0x83 : 0x81, 7, b, true); + maybeRex(c, aSize, b); + opcode(c, isInt8(v) ? 0x83 : 0x81); + modrmSibImm(c, rdi, b->scale, b->index, b->base, b->offset); if (isInt8(v)) { c->code.append(v); @@ -1575,8 +1639,8 @@ compareCM(Context* c, unsigned aSize, Assembler::Constant* a, } void -longCompare(Context* c, Assembler::Operand* al, Assembler::Operand* ah, - Assembler::Register* bl, Assembler::Operand* bh, +longCompare(Context* c, Assembler::Operand* al, UNUSED Assembler::Operand* ah, + Assembler::Register* bl, UNUSED Assembler::Operand* bh, BinaryOperationType compare) { ResolvedPromise negativePromise(-1); @@ -1591,19 +1655,17 @@ longCompare(Context* c, Assembler::Operand* al, Assembler::Operand* ah, if (BytesPerWord == 8) { compare(c, 8, al, 8, bl); - c->code.append(0x0f); - c->code.append(0x8c); // jl + opcode(c, 0x0f, 0x8c); // jl unsigned less = c->code.length(); c->code.append4(0); - c->code.append(0x0f); - c->code.append(0x8f); // jg + opcode(c, 0x0f, 0x8f); // jg unsigned greater = c->code.length(); c->code.append4(0); moveCR(c, 4, &zero, 4, bl); - c->code.append(0xe9); // jmp + opcode(c, 0xe9); // jmp unsigned nextFirst = c->code.length(); c->code.append4(0); @@ -1612,7 +1674,7 @@ longCompare(Context* c, Assembler::Operand* al, Assembler::Operand* ah, moveCR(c, 4, &negative, 4, bl); - c->code.append(0xe9); // jmp + opcode(c, 0xe9); // jmp unsigned nextSecond = c->code.length(); c->code.append4(0); @@ -1629,25 +1691,21 @@ longCompare(Context* c, Assembler::Operand* al, Assembler::Operand* ah, } else { compare(c, 4, ah, 4, bh); - c->code.append(0x0f); - c->code.append(0x8c); // jl + opcode(c, 0x0f, 0x8c); //jl unsigned less = c->code.length(); c->code.append4(0); - c->code.append(0x0f); - c->code.append(0x8f); // jg + opcode(c, 0x0f, 0x8f); //jg unsigned greater = c->code.length(); c->code.append4(0); compare(c, 4, al, 4, bl); - c->code.append(0x0f); - c->code.append(0x82); // ja + opcode(c, 0x0f, 0x82); //ja unsigned above = c->code.length(); c->code.append4(0); - c->code.append(0x0f); - c->code.append(0x87); // jb + opcode(c, 0x0f, 0x87); //jb unsigned below = c->code.length(); c->code.append4(0); @@ -1665,7 +1723,7 @@ longCompare(Context* c, Assembler::Operand* al, Assembler::Operand* ah, moveCR(c, 4, &negative, 4, bl); - c->code.append(0xe9); // jmp + opcode(c, 0xe9); // jmp unsigned nextSecond = c->code.length(); c->code.append4(0); @@ -1689,7 +1747,6 @@ void divideRR(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize UNUSED, Assembler::Register* b UNUSED) { - assert(c, BytesPerWord == 8 or aSize == 4); assert(c, aSize == bSize); assert(c, b->low == rax); @@ -1697,18 +1754,16 @@ divideRR(Context* c, unsigned aSize, Assembler::Register* a, c->client->save(rdx); - if (aSize == 8) rex(c); - c->code.append(0x99); // cdq - if (aSize == 8) rex(c); - c->code.append(0xf7); - c->code.append(0xf8 | a->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x99); // cdq + maybeRex(c, aSize, b, a); + opcode(c, 0xf7, 0xf8 + regCode(a)); } void remainderRR(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize UNUSED, Assembler::Register* b) { - assert(c, BytesPerWord == 8 or aSize == 4); assert(c, aSize == bSize); assert(c, b->low == rax); @@ -1716,11 +1771,10 @@ remainderRR(Context* c, unsigned aSize, Assembler::Register* a, c->client->save(rdx); - if (aSize == 8) rex(c); - c->code.append(0x99); // cdq - if (aSize == 8) rex(c); - c->code.append(0xf7); - c->code.append(0xf8 | a->low); + maybeRex(c, aSize, a, b); + opcode(c, 0x99); // cdq + maybeRex(c, aSize, b, a); + opcode(c, 0xf7, 0xf8 + regCode(a)); Assembler::Register dx(rdx); moveRR(c, BytesPerWord, &dx, BytesPerWord, b); @@ -1760,10 +1814,10 @@ longCompareRR(Context* c, unsigned aSize UNUSED, Assembler::Register* a, } void -doShift(Context* c, void (*shift) +doShift(Context* c, UNUSED void (*shift) (Context*, unsigned, Assembler::Register*, unsigned, Assembler::Register*), - int type, unsigned aSize, Assembler::Constant* a, + int type, UNUSED unsigned aSize, Assembler::Constant* a, unsigned bSize, Assembler::Register* b) { int64_t v = a->value->value(); @@ -1775,13 +1829,11 @@ doShift(Context* c, void (*shift) moveCR(c, 4, a, 4, &cx); shift(c, aSize, &cx, bSize, b); } else { - if (bSize == 8) rex(c); + maybeRex(c, bSize, b); if (v == 1) { - c->code.append(0xd1); - c->code.append(type | b->low); + opcode(c, 0xd1, type + regCode(b)); } else if (isInt8(v)) { - c->code.append(0xc1); - c->code.append(type | b->low); + opcode(c, 0xc1, type + regCode(b)); c->code.append(v); } else { abort(c); @@ -1790,36 +1842,32 @@ doShift(Context* c, void (*shift) } void -shiftLeftRR(Context* c, unsigned aSize, Assembler::Register* a, +shiftLeftRR(Context* c, UNUSED unsigned aSize, Assembler::Register* a, unsigned bSize, Assembler::Register* b) { assert(c, a->low == rcx); - + if (BytesPerWord == 4 and bSize == 8) { // shld - c->code.append(0x0f); - c->code.append(0xa5); - c->code.append(0xc0 | (b->low << 3) | b->high); + opcode(c, 0x0f, 0xa5); + modrm(c, 0xc0, b->high, b->low); // shl - c->code.append(0xd3); - c->code.append(0xe0 | b->low); + opcode(c, 0xd3, 0xe0 + b->low); ResolvedPromise promise(32); Assembler::Constant constant(&promise); compareCR(c, aSize, &constant, aSize, a); - c->code.append(0x0f); - c->code.append(0x8c); // jl + opcode(c, 0x0f, 0x8c); //jl c->code.append4(2 + 2); Assembler::Register bh(b->high); moveRR(c, 4, b, 4, &bh); // 2 bytes xorRR(c, 4, b, 4, b); // 2 bytes } else { - if (bSize == 8) rex(c); - c->code.append(0xd3); - c->code.append(0xe0 | b->low); + maybeRex(c, bSize, a, b); + opcode(c, 0xd3, 0xe0 + regCode(b)); } } @@ -1831,40 +1879,34 @@ shiftLeftCR(Context* c, unsigned aSize, Assembler::Constant* a, } void -shiftRightRR(Context* c, unsigned aSize, Assembler::Register* a, +shiftRightRR(Context* c, UNUSED unsigned aSize, Assembler::Register* a, unsigned bSize, Assembler::Register* b) { assert(c, a->low == rcx); - if (BytesPerWord == 4 and bSize == 8) { // shrd - c->code.append(0x0f); - c->code.append(0xad); - c->code.append(0xc0 | (b->high << 3) | b->low); + opcode(c, 0x0f, 0xad); + modrm(c, 0xc0, b->low, b->high); // sar - c->code.append(0xd3); - c->code.append(0xf8 | b->high); + opcode(c, 0xd3, 0xf8 + b->high); ResolvedPromise promise(32); Assembler::Constant constant(&promise); compareCR(c, aSize, &constant, aSize, a); - c->code.append(0x0f); - c->code.append(0x8c); // jl + opcode(c, 0x0f, 0x8c); //jl c->code.append4(2 + 3); Assembler::Register bh(b->high); moveRR(c, 4, &bh, 4, b); // 2 bytes // sar 31,high - c->code.append(0xc1); - c->code.append(0xf8 | b->high); + opcode(c, 0xc1, 0xf8 + b->high); c->code.append(31); } else { - if (bSize == 8) rex(c); - c->code.append(0xd3); - c->code.append(0xf8 | b->low); + maybeRex(c, bSize, a, b); + opcode(c, 0xd3, 0xf8 + regCode(b)); } } @@ -1876,36 +1918,32 @@ shiftRightCR(Context* c, unsigned aSize, Assembler::Constant* a, } void -unsignedShiftRightRR(Context* c, unsigned aSize, Assembler::Register* a, +unsignedShiftRightRR(Context* c, UNUSED unsigned aSize, Assembler::Register* a, unsigned bSize, Assembler::Register* b) { assert(c, a->low == rcx); if (BytesPerWord == 4 and bSize == 8) { // shrd - c->code.append(0x0f); - c->code.append(0xad); - c->code.append(0xc0 | (b->high << 3) | b->low); + opcode(c, 0x0f, 0xad); + modrm(c, 0xc0, b->low, b->high); // shr - c->code.append(0xd3); - c->code.append(0xe8 | b->high); + opcode(c, 0xd3, 0xe8 + b->high); ResolvedPromise promise(32); Assembler::Constant constant(&promise); compareCR(c, aSize, &constant, aSize, a); - c->code.append(0x0f); - c->code.append(0x8c); // jl + opcode(c, 0x0f, 0x8c); //jl c->code.append4(2 + 2); Assembler::Register bh(b->high); moveRR(c, 4, &bh, 4, b); // 2 bytes xorRR(c, 4, &bh, 4, &bh); // 2 bytes } else { - if (bSize == 8) rex(c); - c->code.append(0xd3); - c->code.append(0xe8 | b->low); + maybeRex(c, bSize, a, b); + opcode(c, 0xd3, 0xe8 + regCode(b)); } } @@ -2015,7 +2053,7 @@ class MyArchitecture: public Assembler::Architecture { } virtual unsigned registerCount() { - return 8;//BytesPerWord == 4 ? 8 : 16; + return (BytesPerWord == 4 ? 8 : 16); } virtual int stack() { @@ -2063,9 +2101,13 @@ class MyArchitecture: public Assembler::Architecture { } virtual unsigned frameFootprint(unsigned footprint) { +#ifdef __WINDOWS__ + return max(footprint, StackAlignmentInWords); +#else return max(footprint > argumentRegisterCount() ? footprint - argumentRegisterCount() : 0, StackAlignmentInWords); +#endif } virtual unsigned argumentFootprint(unsigned footprint) { @@ -2073,13 +2115,27 @@ class MyArchitecture: public Assembler::Architecture { } virtual unsigned argumentRegisterCount() { - return (BytesPerWord == 4 ? 0 : 6); +#ifdef __WINDOWS__ + if (BytesPerWord == 8) return 4; else +#elif defined __LINUX__ + if (BytesPerWord == 8) return 6; else +#endif + return 0; } virtual int argumentRegister(unsigned index) { assert(&c, BytesPerWord == 8); - switch (index) { +#ifdef __WINDOWS__ + case 0: + return rcx; + case 1: + return rdx; + case 2: + return r8; + case 3: + return r9; +#elif defined __LINUX__ case 0: return rdi; case 1: @@ -2092,6 +2148,7 @@ class MyArchitecture: public Assembler::Architecture { return r8; case 5: return r9; +#endif default: abort(&c); } @@ -2114,7 +2171,7 @@ class MyArchitecture: public Assembler::Architecture { { if (BytesPerWord == 4 or op == Call or op == Jump) { uint8_t* instruction = static_cast(returnAddress) - 5; - + assert(&c, ((op == Call or op == LongCall) and *instruction == 0xE8) or ((op == Jump or op == LongJump) and *instruction == 0xE9)); @@ -2611,3 +2668,5 @@ makeAssembler(System* system, Allocator* allocator, Zone* zone, } // namespace vm + +#endif //(defined __i386__) || (defined __x86_64__) diff --git a/src/x86.h b/src/x86.h index d746083c3b..a7f8e4dd8f 100644 --- a/src/x86.h +++ b/src/x86.h @@ -58,11 +58,24 @@ dynamicCall(void* function, uintptr_t* arguments, uint8_t*, # define THREAD_REGISTER(context) (context->uc_mcontext.gregs[REG_RBX]) extern "C" uint64_t +# ifdef __MINGW32__ +vmNativeCall(void* function, void* stack, unsigned stackSize, + unsigned returnType); +# else vmNativeCall(void* function, void* stack, unsigned stackSize, void* gprTable, void* sseTable, unsigned returnType); +# endif namespace vm { +# ifdef __MINGW32__ +inline uint64_t +dynamicCall(void* function, uint64_t* arguments, UNUSED uint8_t* argumentTypes, + unsigned argumentCount, unsigned, unsigned returnType) +{ + return vmNativeCall(function, arguments, argumentCount, returnType); +} +# else inline uint64_t dynamicCall(void* function, uint64_t* arguments, uint8_t* argumentTypes, unsigned argumentCount, unsigned, unsigned returnType) @@ -103,6 +116,7 @@ dynamicCall(void* function, uint64_t* arguments, uint8_t* argumentTypes, (gprIndex ? gprTable : 0), (sseIndex ? sseTable : 0), returnType); } +#endif } // namespace vm