remove powerpc support

This commit is contained in:
Joshua Warner 2014-04-29 13:26:40 -06:00
parent 32d25d79fd
commit 41adb74eb1
35 changed files with 12 additions and 4196 deletions

View File

@ -59,7 +59,7 @@ Supported Platforms
Avian can currently target the following platforms:
* Linux (i386, x86_64, ARM, and 32-bit PowerPC)
* Linux (i386, x86_64, and ARM)
* Windows (i386 and x86_64)
* Mac OS X (i386 and x86_64)
* Apple iOS (i386 and ARM)
@ -86,7 +86,7 @@ certain flags described below, all of which are optional.
$ make \
platform={linux,windows,darwin,freebsd} \
arch={i386,x86_64,powerpc,arm} \
arch={i386,x86_64,arm} \
process={compile,interpret} \
mode={debug,debug-fast,fast,small} \
lzma=<lzma source directory> \

View File

@ -701,9 +701,6 @@ extern "C" JNIEXPORT jobjectArray JNICALL
#elif defined ARCH_x86_64
e->SetObjectArrayElement(array, index++, e->NewStringUTF("os.arch=x86_64"));
#elif defined ARCH_powerpc
e->SetObjectArrayElement(array, index++, e->NewStringUTF("os.arch=ppc"));
#elif defined ARCH_arm
e->SetObjectArrayElement(array, index++, e->NewStringUTF("os.arch=arm"));

View File

@ -67,8 +67,6 @@ typedef unsigned __int64 uint64_t;
# define ARCH_x86_32
# elif defined __x86_64__
# define ARCH_x86_64
# elif (defined __POWERPC__) || (defined __powerpc__)
# define ARCH_powerpc
# elif defined __arm__
# define ARCH_arm
# endif

View File

@ -41,7 +41,7 @@ const bool TailCalls = true;
const bool TailCalls = false;
#endif
#if (defined AVIAN_USE_FRAME_POINTER) || (defined ARCH_powerpc)
#ifdef AVIAN_USE_FRAME_POINTER
const bool UseFramePointer = true;
#else
const bool UseFramePointer = false;

View File

@ -24,7 +24,6 @@ Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures)
Architecture* makeArchitectureX86(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitectureArm(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitecturePowerpc(vm::System* system, bool useNativeFeatures);
} // namespace codegen
} // namespace avian

View File

@ -114,7 +114,6 @@ public:
enum Architecture {
x86 = AVIAN_ARCH_X86,
x86_64 = AVIAN_ARCH_X86_64,
PowerPC = AVIAN_ARCH_POWERPC,
Arm = AVIAN_ARCH_ARM,
UnknownArch = AVIAN_ARCH_UNKNOWN
};
@ -132,10 +131,6 @@ public:
inline bool operator == (const PlatformInfo& other) {
return format == other.format && arch == other.arch;
}
inline bool isLittleEndian() {
return arch != PowerPC;
}
};
class Platform {

View File

@ -7,12 +7,7 @@ build-arch := $(shell uname -m \
| sed 's/^i.86$$/i386/' \
| sed 's/^x86pc$$/i386/' \
| sed 's/amd64/x86_64/' \
| sed 's/^arm.*$$/arm/' \
| sed 's/ppc/powerpc/')
ifeq (Power,$(filter Power,$(build-arch)))
build-arch = powerpc
endif
| sed 's/^arm.*$$/arm/')
build-platform := \
$(shell uname -s | tr [:upper:] [:lower:] \
@ -428,35 +423,10 @@ codeimage-symbols = _binary_codeimage_bin_start:_binary_codeimage_bin_end
developer-dir := $(shell if test -d /Developer/Platforms/$(target).platform/Developer/SDKs; then echo /Developer; \
else echo /Applications/Xcode.app/Contents/Developer; fi)
ifeq ($(build-arch),powerpc)
ifneq ($(arch),$(build-arch))
bootimage-cflags += -DTARGET_OPPOSITE_ENDIAN
endif
endif
ifeq ($(arch),i386)
pointer-size = 4
endif
ifeq ($(arch),powerpc)
asm = powerpc
pointer-size = 4
ifneq ($(arch),$(build-arch))
bootimage-cflags += -DTARGET_OPPOSITE_ENDIAN
endif
ifneq ($(platform),darwin)
ifneq ($(arch),$(build-arch))
cxx = powerpc-linux-gnu-g++
cc = powerpc-linux-gnu-gcc
ar = powerpc-linux-gnu-ar
ranlib = powerpc-linux-gnu-ranlib
strip = powerpc-linux-gnu-strip
endif
endif
endif
ifeq ($(arch),arm)
asm = arm
pointer-size = 4
@ -709,13 +679,6 @@ ifeq ($(platform),darwin)
lflags += $(flags)
endif
ifeq ($(arch),powerpc)
classpath-extra-cflags += -arch ppc -mmacosx-version-min=${OSX_SDK_VERSION}
cflags += -arch ppc -mmacosx-version-min=${OSX_SDK_VERSION}
asmflags += -arch ppc -mmacosx-version-min=${OSX_SDK_VERSION}
lflags += -arch ppc -mmacosx-version-min=${OSX_SDK_VERSION}
endif
ifeq ($(arch),i386)
classpath-extra-cflags += \
-arch i386 -mmacosx-version-min=${OSX_SDK_VERSION}
@ -1153,12 +1116,9 @@ x86-assembler-sources = $(wildcard $(src)/codegen/target/x86/*.cpp)
arm-assembler-sources = $(wildcard $(src)/codegen/target/arm/*.cpp)
powerpc-assembler-sources = $(wildcard $(src)/codegen/target/powerpc/*.cpp)
all-assembler-sources = \
$(x86-assembler-sources) \
$(arm-assembler-sources) \
$(powerpc-assembler-sources)
$(arm-assembler-sources)
native-assembler-sources = $($(target-asm)-assembler-sources)
@ -1424,10 +1384,6 @@ ifeq ($(target-arch),x86_64)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_X86_64
endif
ifeq ($(target-arch),powerpc)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_POWERPC
endif
ifeq ($(target-arch),arm)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM
endif

View File

@ -41,8 +41,6 @@ compileTimeMemoryBarrier()
#if (defined ARCH_x86_32) || (defined ARCH_x86_64)
# include "x86.h"
#elif defined ARCH_powerpc
# include "powerpc.h"
#elif defined ARCH_arm
# include "arm.h"
#else

View File

@ -28,10 +28,6 @@
#include "float.h"
#include <stdint.h>
#ifdef powerpc
# undef powerpc
#endif
#ifdef linux
# undef linux
#endif
@ -126,8 +122,6 @@ typedef intptr_t intptr_alias_t;
# define ARCH_x86_32
# elif defined __x86_64__
# define ARCH_x86_64
# elif (defined __POWERPC__) || (defined __powerpc__)
# define ARCH_powerpc
# elif defined __arm__
# define ARCH_arm
# else
@ -151,7 +145,7 @@ typedef intptr_t __attribute__((__may_alias__)) intptr_alias_t;
# define PATH_SEPARATOR ':'
#endif // not PLATFORM_WINDOWS
#if (defined ARCH_x86_32) || (defined ARCH_powerpc) || (defined ARCH_arm)
#if (defined ARCH_x86_32) || (defined ARCH_arm)
# define LD "ld"
# if (defined _MSC_VER) || ((defined __MINGW32__) && __GNUC__ >= 4)
# define LLD "I64d"

View File

@ -28,7 +28,6 @@
#define AVIAN_ARCH_X86 (1 << 8)
#define AVIAN_ARCH_X86_64 (2 << 8)
#define AVIAN_ARCH_ARM (3 << 8)
#define AVIAN_ARCH_POWERPC (4 << 8)
#endif

View File

@ -184,10 +184,6 @@ struct JavaVMVTable {
void* reserved1;
void* reserved2;
#if (! TARGET_RT_MAC_CFM) && defined(__ppc__)
void* cfm_vectors[4];
#endif
jint
(JNICALL *DestroyJavaVM)
(JavaVM*);
@ -207,10 +203,6 @@ struct JavaVMVTable {
jint
(JNICALL *AttachCurrentThreadAsDaemon)
(JavaVM*, JNIEnv**, void*);
#if TARGET_RT_MAC_CFM && defined(__ppc__)
void* real_functions[5];
#endif
};
struct JNIEnvVTable {
@ -219,10 +211,6 @@ struct JNIEnvVTable {
void* reserved2;
void* reserved3;
#if (! TARGET_RT_MAC_CFM) && defined(__ppc__)
void* cfm_vectors[225];
#endif
jint
(JNICALL *GetVersion)
(JNIEnv*);
@ -1139,9 +1127,6 @@ struct JNIEnvVTable {
(JNICALL *GetDirectBufferCapacity)
(JNIEnv*, jobject);
#if TARGET_RT_MAC_CFM && defined(__ppc__)
void* real_functions[228];
#endif
};
inline void

View File

@ -1,249 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef POWERPC_H
#define POWERPC_H
#include "avian/types.h"
#include "avian/common.h"
#ifdef __APPLE__
# include "mach/mach_types.h"
# include "mach/ppc/thread_act.h"
# include "mach/ppc/thread_status.h"
# define THREAD_STATE PPC_THREAD_STATE
# define THREAD_STATE_TYPE ppc_thread_state_t
# define THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
# if __DARWIN_UNIX03 && defined(_STRUCT_PPC_EXCEPTION_STATE)
# define FIELD(x) __##x
# else
# define FIELD(x) x
# endif
# define THREAD_STATE_IP(state) ((state).FIELD(srr0))
# define THREAD_STATE_STACK(state) ((state).FIELD(r1))
# define THREAD_STATE_THREAD(state) ((state).FIELD(r13))
# define THREAD_STATE_LINK(state) ((state).FIELD(lr))
# define IP_REGISTER(context) \
THREAD_STATE_IP(context->uc_mcontext->FIELD(ss))
# define STACK_REGISTER(context) \
THREAD_STATE_STACK(context->uc_mcontext->FIELD(ss))
# define THREAD_REGISTER(context) \
THREAD_STATE_THREAD(context->uc_mcontext->FIELD(ss))
# define LINK_REGISTER(context) \
THREAD_STATE_LINK(context->uc_mcontext->FIELD(ss))
#define VA_LIST(x) (&(x))
#else // not __APPLE__
# define IP_REGISTER(context) (context->uc_mcontext.regs->gpr[32])
# define STACK_REGISTER(context) (context->uc_mcontext.regs->gpr[1])
# define THREAD_REGISTER(context) (context->uc_mcontext.regs->gpr[13])
# define LINK_REGISTER(context) (context->uc_mcontext.regs->gpr[36])
#define VA_LIST(x) (x)
#endif // not __APPLE__
extern "C" uint64_t
vmNativeCall(void* function, unsigned stackTotal, void* memoryTable,
unsigned memoryCount, unsigned memoryBase,
void* gprTable, void* fprTable, unsigned returnType);
namespace vm {
inline void
trap()
{
asm("trap");
}
inline void
memoryBarrier()
{
__asm__ __volatile__("sync": : :"memory");
}
inline void
storeStoreMemoryBarrier()
{
memoryBarrier();
}
inline void
storeLoadMemoryBarrier()
{
memoryBarrier();
}
inline void
loadMemoryBarrier()
{
memoryBarrier();
}
inline void
syncInstructionCache(const void* start, unsigned size)
{
const unsigned CacheLineSize = 32;
const uintptr_t Mask = ~(CacheLineSize - 1);
uintptr_t cacheLineStart = reinterpret_cast<uintptr_t>(start) & Mask;
uintptr_t cacheLineEnd
= (reinterpret_cast<uintptr_t>(start) + size + CacheLineSize - 1) & Mask;
for (uintptr_t p = cacheLineStart; p < cacheLineEnd; p += CacheLineSize) {
__asm__ __volatile__("dcbf 0, %0" : : "r" (p));
}
__asm__ __volatile__("sync");
for (uintptr_t p = cacheLineStart; p < cacheLineEnd; p += CacheLineSize) {
__asm__ __volatile__("icbi 0, %0" : : "r" (p));
}
__asm__ __volatile__("isync");
}
#ifdef USE_ATOMIC_OPERATIONS
inline bool
atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
{
#if (__GNUC__ >= 4) && (__GNUC_MINOR__ >= 1)
return __sync_bool_compare_and_swap(p, old, new_);
#else // not GCC >= 4.1
bool result;
__asm__ __volatile__(" sync\n"
"1:\n"
" lwarx %0,0,%2\n"
" cmpw %0,%3\n"
" bne- 2f\n"
" stwcx. %4,0,%2\n"
" bne- 1b\n"
" isync \n"
"2:\n"
" xor %0,%0,%3\n"
" cntlzw %0,%0\n"
" srwi %0,%0,5\n"
: "=&r"(result), "+m"(*p)
: "r"(p), "r"(old), "r"(new_)
: "cc", "memory");
return result;
#endif // not GCC >= 4.1
}
inline bool
atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{
return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_);
}
#endif // USE_ATOMIC_OPERATIONS
inline uint64_t
dynamicCall(void* function, uintptr_t* arguments, uint8_t* argumentTypes,
unsigned argumentCount, unsigned argumentsSize,
unsigned returnType)
{
#ifdef __APPLE__
# define SKIP(var, count) var += count;
# define ALIGN(var)
const unsigned LinkageArea = 24;
const unsigned FprCount = 13;
#else
# define SKIP(var, count)
# define ALIGN(var) if (var & 1) ++var;
const unsigned LinkageArea = 8;
const unsigned FprCount = 8;
#endif
const unsigned GprCount = 8;
uintptr_t gprTable[GprCount];
unsigned gprIndex = 0;
uint64_t fprTable[FprCount];
unsigned fprIndex = 0;
uintptr_t stack[argumentsSize / BytesPerWord];
unsigned stackSkip = 0;
unsigned stackIndex = 0;
unsigned ai = 0;
for (unsigned ati = 0; ati < argumentCount; ++ ati) {
switch (argumentTypes[ati]) {
case FLOAT_TYPE: {
if (fprIndex < FprCount) {
double d = bitsToFloat(arguments[ai]);
memcpy(fprTable + fprIndex, &d, 8);
++ fprIndex;
SKIP(gprIndex, 1);
SKIP(stackSkip, 1);
} else {
stack[stackIndex++] = arguments[ai];
}
++ ai;
} break;
case DOUBLE_TYPE: {
if (fprIndex + 1 <= FprCount) {
memcpy(fprTable + fprIndex, arguments + ai, 8);
++ fprIndex;
SKIP(gprIndex, 8 / BytesPerWord);
SKIP(stackSkip, 8 / BytesPerWord);
} else {
ALIGN(stackIndex);
memcpy(stack + stackIndex, arguments + ai, 8);
stackIndex += 8 / BytesPerWord;
}
ai += 8 / BytesPerWord;
} break;
case INT64_TYPE: {
if (gprIndex + (8 / BytesPerWord) <= GprCount) {
ALIGN(gprIndex);
memcpy(gprTable + gprIndex, arguments + ai, 8);
gprIndex += 8 / BytesPerWord;
SKIP(stackSkip, 8 / BytesPerWord);
} else {
ALIGN(stackIndex);
memcpy(stack + stackIndex, arguments + ai, 8);
stackIndex += 8 / BytesPerWord;
}
ai += 8 / BytesPerWord;
} break;
default: {
if (gprIndex < GprCount) {
gprTable[gprIndex++] = arguments[ai];
SKIP(stackSkip, 1);
} else {
stack[stackIndex++] = arguments[ai];
}
++ ai;
} break;
}
}
return vmNativeCall
(function,
(((1 + stackSkip + stackIndex) * BytesPerWord) + LinkageArea + 15) & -16,
stack, stackIndex * BytesPerWord,
LinkageArea + (stackSkip * BytesPerWord),
(gprIndex ? gprTable : 0),
(fprIndex ? fprTable : 0), returnType);
}
} // namespace vm
#endif//POWERPC_H

View File

@ -2227,11 +2227,7 @@ extern "C" AVIAN_EXPORT int64_t JNICALL
Avian_java_nio_ByteOrder_isLittleEndian
(Thread*, object, uintptr_t*)
{
#ifdef ARCH_powerpc
return false;
#else
return true;
#endif
}
extern "C" AVIAN_EXPORT int64_t JNICALL
@ -2432,8 +2428,6 @@ Avian_libcore_io_Posix_uname(Thread* t, object, uintptr_t*)
object arch = makeString(t, "x86");
#elif defined ARCH_x86_64
object arch = makeString(t, "x86_64");
#elif defined ARCH_powerpc
object arch = makeString(t, "ppc");
#elif defined ARCH_arm
object arch = makeString(t, "arm");
#else

View File

@ -2986,8 +2986,6 @@ jvmInitProperties(Thread* t, uintptr_t* arguments)
local::setProperty(t, method, *properties, "os.arch", "x86");
#elif defined ARCH_x86_64
local::setProperty(t, method, *properties, "os.arch", "x86_64");
#elif defined ARCH_powerpc
local::setProperty(t, method, *properties, "os.arch", "ppc");
#elif defined ARCH_arm
local::setProperty(t, method, *properties, "os.arch", "arm");
#else

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
namespace avian {
namespace codegen {
namespace powerpc {
void resolve(MyBlock*);
unsigned padding(MyBlock*, unsigned);
MyBlock::MyBlock(Context* context, unsigned offset):
context(context), next(0), jumpOffsetHead(0), jumpOffsetTail(0),
lastJumpOffsetTail(0), jumpEventHead(0), jumpEventTail(0),
lastEventOffset(0), offset(offset), start(~0), size(0), resolved(false)
{ }
unsigned MyBlock::resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
powerpc::resolve(this);
this->resolved = true;
return start + size + padding(this, size);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -1,44 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H
namespace avian {
namespace codegen {
namespace powerpc {
class JumpEvent;
class MyBlock: public Assembler::Block {
public:
MyBlock(Context* context, unsigned offset);
virtual unsigned resolve(unsigned start, Assembler::Block* next);
Context* context;
MyBlock* next;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpOffset* lastJumpOffsetTail;
JumpEvent* jumpEventHead;
JumpEvent* jumpEventTail;
unsigned lastEventOffset;
unsigned offset;
unsigned start;
unsigned size;
bool resolved;
};
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H

View File

@ -1,37 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
namespace avian {
namespace codegen {
namespace powerpc {
Context::Context(vm::System* s, util::Allocator* a, vm::Zone* zone)
: s(s),
zone(zone),
client(0),
code(s, a, 1024),
tasks(0),
result(0),
firstBlock(new (zone) MyBlock(this, 0)),
lastBlock(firstBlock),
jumpOffsetHead(0),
jumpOffsetTail(0),
constantPool(0),
constantPoolCount(0)
{
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -1,102 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H
#include <avian/codegen/assembler.h>
#include "avian/alloc-vector.h"
#ifdef powerpc
#undef powerpc
#endif
namespace vm {
class System;
class Zone;
} // namespace vm
namespace avian {
namespace util {
class Allocator;
}
namespace codegen {
namespace powerpc {
class Task;
class JumpOffset;
class ConstantPoolEntry;
class MyBlock;
class Context {
public:
Context(vm::System* s, util::Allocator* a, vm::Zone* zone);
vm::System* s;
vm::Zone* zone;
Assembler::Client* client;
vm::Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
ConstantPoolEntry* constantPool;
unsigned constantPoolCount;
};
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*TernaryOperationType)
(Context*, unsigned, lir::Operand*, lir::Operand*,
lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(vm::System* s): s(s) { }
vm::System* s;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[lir::BinaryOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
TernaryOperationType ternaryOperations
[lir::NonBranchTernaryOperationCount * lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
};
inline avian::util::Aborter* getAborter(Context* con) {
return con->s;
}
inline avian::util::Aborter* getAborter(ArchitectureContext* con) {
return con->s;
}
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H

View File

@ -1,141 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H
#ifdef powerpc
#undef powerpc
#endif
namespace avian {
namespace codegen {
namespace powerpc {
namespace isa {
// INSTRUCTION FORMATS
inline int D(int op, int rt, int ra, int d) { return op<<26|rt<<21|ra<<16|(d & 0xFFFF); }
// inline int DS(int op, int rt, int ra, int ds, int xo) { return op<<26|rt<<21|ra<<16|ds<<2|xo; }
inline int I(int op, int li, int aa, int lk) { return op<<26|(li & 0x3FFFFFC)|aa<<1|lk; }
inline int B(int op, int bo, int bi, int bd, int aa, int lk) { return op<<26|bo<<21|bi<<16|(bd & 0xFFFC)|aa<<1|lk; }
// inline int SC(int op, int lev) { return op<<26|lev<<5|2; }
inline int X(int op, int rt, int ra, int rb, int xo, int rc) { return op<<26|rt<<21|ra<<16|rb<<11|xo<<1|rc; }
inline int XL(int op, int bt, int ba, int bb, int xo, int lk) { return op<<26|bt<<21|ba<<16|bb<<11|xo<<1|lk; }
inline int XFX(int op, int rt, int spr, int xo) { return op<<26|rt<<21|((spr >> 5) | ((spr << 5) & 0x3E0))<<11|xo<<1; }
// inline int XFL(int op, int flm, int frb, int xo, int rc) { return op<<26|flm<<17|frb<<11|xo<<1|rc; }
// inline int XS(int op, int rs, int ra, int sh, int xo, int sh2, int rc) { return op<<26|rs<<21|ra<<16|sh<<11|xo<<2|sh2<<1|rc; }
inline int XO(int op, int rt, int ra, int rb, int oe, int xo, int rc) { return op<<26|rt<<21|ra<<16|rb<<11|oe<<10|xo<<1|rc; }
// inline int A(int op, int frt, int fra, int frb, int frc, int xo, int rc) { return op<<26|frt<<21|fra<<16|frb<<11|frc<<6|xo<<1|rc; }
inline int M(int op, int rs, int ra, int rb, int mb, int me, int rc) { return op<<26|rs<<21|ra<<16|rb<<11|mb<<6|me<<1|rc; }
// inline int MD(int op, int rs, int ra, int sh, int mb, int xo, int sh2, int rc) { return op<<26|rs<<21|ra<<16|sh<<11|mb<<5|xo<<2|sh2<<1|rc; }
// inline int MDS(int op, int rs, int ra, int rb, int mb, int xo, int rc) { return op<<26|rs<<21|ra<<16|rb<<11|mb<<5|xo<<1|rc; }
// INSTRUCTIONS
inline int lbz(int rt, int ra, int i) { return D(34, rt, ra, i); }
inline int lbzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 87, 0); }
inline int lha(int rt, int ra, int i) { return D(42, rt, ra, i); }
inline int lhax(int rt, int ra, int rb) { return X(31, rt, ra, rb, 343, 0); }
// inline int lhz(int rt, int ra, int i) { return D(40, rt, ra, i); }
inline int lhzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 279, 0); }
inline int lwz(int rt, int ra, int i) { return D(32, rt, ra, i); }
inline int lwzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 23, 0); }
inline int stb(int rs, int ra, int i) { return D(38, rs, ra, i); }
inline int stbx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 215, 0); }
inline int sth(int rs, int ra, int i) { return D(44, rs, ra, i); }
inline int sthx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 407, 0); }
inline int stw(int rs, int ra, int i) { return D(36, rs, ra, i); }
inline int stwu(int rs, int ra, int i) { return D(37, rs, ra, i); }
inline int stwux(int rs, int ra, int rb) { return X(31, rs, ra, rb, 183, 0); }
inline int stwx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 151, 0); }
inline int add(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 266, 0); }
inline int addc(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 10, 0); }
inline int adde(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 138, 0); }
inline int addi(int rt, int ra, int i) { return D(14, rt, ra, i); }
inline int addic(int rt, int ra, int i) { return D(12, rt, ra, i); }
inline int addis(int rt, int ra, int i) { return D(15, rt, ra, i); }
inline int subf(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 40, 0); }
inline int subfc(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 8, 0); }
inline int subfe(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 136, 0); }
inline int subfic(int rt, int ra, int i) { return D(8, rt, ra, i); }
inline int subfze(int rt, int ra) { return XO(31, rt, ra, 0, 0, 200, 0); }
inline int mullw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 235, 0); }
// inline int mulhw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 75, 0); }
inline int mulhwu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 11, 0); }
// inline int mulli(int rt, int ra, int i) { return D(7, rt, ra, i); }
inline int divw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 491, 0); }
// inline int divwu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 459, 0); }
// inline int divd(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 489, 0); }
// inline int divdu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 457, 0); }
inline int neg(int rt, int ra) { return XO(31, rt, ra, 0, 0, 104, 0); }
inline int and_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 28, 0); }
inline int andi(int rt, int ra, int i) { return D(28, ra, rt, i); }
inline int andis(int rt, int ra, int i) { return D(29, ra, rt, i); }
inline int or_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 444, 0); }
inline int ori(int rt, int ra, int i) { return D(24, rt, ra, i); }
inline int xor_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 316, 0); }
inline int oris(int rt, int ra, int i) { return D(25, rt, ra, i); }
inline int xori(int rt, int ra, int i) { return D(26, rt, ra, i); }
inline int xoris(int rt, int ra, int i) { return D(27, rt, ra, i); }
inline int rlwinm(int rt, int ra, int i, int mb, int me) { return M(21, ra, rt, i, mb, me, 0); }
inline int rlwimi(int rt, int ra, int i, int mb, int me) { return M(20, ra, rt, i, mb, me, 0); }
inline int slw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 24, 0); }
// inline int sld(int rt, int ra, int rb) { return X(31, ra, rt, rb, 27, 0); }
inline int srw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 536, 0); }
inline int sraw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 792, 0); }
inline int srawi(int rt, int ra, int sh) { return X(31, ra, rt, sh, 824, 0); }
inline int extsb(int rt, int rs) { return X(31, rs, rt, 0, 954, 0); }
inline int extsh(int rt, int rs) { return X(31, rs, rt, 0, 922, 0); }
inline int mfspr(int rt, int spr) { return XFX(31, rt, spr, 339); }
inline int mtspr(int spr, int rs) { return XFX(31, rs, spr, 467); }
inline int b(int i) { return I(18, i, 0, 0); }
inline int bl(int i) { return I(18, i, 0, 1); }
inline int bcctr(int bo, int bi, int lk) { return XL(19, bo, bi, 0, 528, lk); }
inline int bclr(int bo, int bi, int lk) { return XL(19, bo, bi, 0, 16, lk); }
inline int bc(int bo, int bi, int bd, int lk) { return B(16, bo, bi, bd, 0, lk); }
inline int cmp(int bf, int ra, int rb) { return X(31, bf << 2, ra, rb, 0, 0); }
inline int cmpl(int bf, int ra, int rb) { return X(31, bf << 2, ra, rb, 32, 0); }
inline int cmpi(int bf, int ra, int i) { return D(11, bf << 2, ra, i); }
inline int cmpli(int bf, int ra, int i) { return D(10, bf << 2, ra, i); }
inline int sync(int L) { return X(31, L, 0, 0, 598, 0); }
// PSEUDO-INSTRUCTIONS
inline int li(int rt, int i) { return addi(rt, 0, i); }
inline int lis(int rt, int i) { return addis(rt, 0, i); }
inline int slwi(int rt, int ra, int i) { return rlwinm(rt, ra, i, 0, 31-i); }
inline int srwi(int rt, int ra, int i) { return rlwinm(rt, ra, 32-i, i, 31); }
// inline int sub(int rt, int ra, int rb) { return subf(rt, rb, ra); }
// inline int subc(int rt, int ra, int rb) { return subfc(rt, rb, ra); }
// inline int subi(int rt, int ra, int i) { return addi(rt, ra, -i); }
// inline int subis(int rt, int ra, int i) { return addis(rt, ra, -i); }
inline int mr(int rt, int ra) { return or_(rt, ra, ra); }
inline int mflr(int rx) { return mfspr(rx, 8); }
inline int mtlr(int rx) { return mtspr(8, rx); }
inline int mtctr(int rd) { return mtspr(9, rd); }
inline int bctr() { return bcctr(20, 0, 0); }
inline int bctrl() { return bcctr(20, 0, 1); }
inline int blr() { return bclr(20, 0, 0); }
inline int blt(int i) { return bc(12, 0, i, 0); }
inline int bgt(int i) { return bc(12, 1, i, 0); }
inline int bge(int i) { return bc(4, 0, i, 0); }
inline int ble(int i) { return bc(4, 1, i, 0); }
inline int beq(int i) { return bc(12, 2, i, 0); }
inline int bne(int i) { return bc(4, 2, i, 0); }
inline int cmpw(int ra, int rb) { return cmp(0, ra, rb); }
inline int cmplw(int ra, int rb) { return cmpl(0, ra, rb); }
inline int cmpwi(int ra, int i) { return cmpi(0, ra, i); }
inline int cmplwi(int ra, int i) { return cmpli(0, ra, i); }
inline int trap() { return 0x7fe00008; } // todo: macro-ify
} // namespace isa
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H

View File

@ -1,243 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "fixup.h"
#include "encode.h"
namespace avian {
namespace codegen {
namespace powerpc {
using namespace isa;
using namespace util;
unsigned padding(MyBlock*, unsigned);
int ha16(int32_t i);
bool bounded(int right, int left, int32_t v) {
return ((v << left) >> left) == v and ((v >> right) << right) == v;
}
OffsetPromise::OffsetPromise(Context* c, MyBlock* block, unsigned offset):
c(c), block(block), offset(offset)
{ }
bool OffsetPromise::resolved() {
return block->resolved;
}
int64_t OffsetPromise::value() {
assert(c, resolved());
unsigned o = offset - block->offset;
return block->start + padding(block, o) + o;
}
Promise* offsetPromise(Context* c) {
return new(c->zone) OffsetPromise(c, c->lastBlock, c->code.length());
}
void* updateOffset(vm::System* s, uint8_t* instruction, bool conditional, int64_t value,
void* jumpAddress)
{
int32_t v = reinterpret_cast<uint8_t*>(value) - instruction;
int32_t mask;
if (conditional) {
if (not bounded(2, 16, v)) {
*static_cast<uint32_t*>(jumpAddress) = isa::b(0);
updateOffset(s, static_cast<uint8_t*>(jumpAddress), false, value, 0);
v = static_cast<uint8_t*>(jumpAddress) - instruction;
expect(s, bounded(2, 16, v));
}
mask = 0xFFFC;
} else {
expect(s, bounded(2, 6, v));
mask = 0x3FFFFFC;
}
int32_t* p = reinterpret_cast<int32_t*>(instruction);
*p = vm::targetV4((v & mask) | ((~mask) & vm::targetV4(*p)));
return instruction + 4;
}
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction, bool conditional,
void* jumpAddress):
s(s),
instruction(instruction),
jumpAddress(jumpAddress),
conditional(conditional)
{ }
bool OffsetListener::resolve(int64_t value, void** location) {
void* p = updateOffset(s, instruction, conditional, value, jumpAddress);
if (location) *location = p;
return false;
}
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
bool conditional):
Task(next),
promise(promise),
instructionOffset(instructionOffset),
jumpAddress(0),
conditional(conditional)
{ }
void OffsetTask::run(Context* c) {
if (promise->resolved()) {
updateOffset
(c->s, c->result + instructionOffset->value(), conditional,
promise->value(), jumpAddress);
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
conditional, jumpAddress);
}
}
JumpOffset::JumpOffset(MyBlock* block, OffsetTask* task, unsigned offset):
block(block), task(task), next(0), offset(offset)
{ }
JumpEvent::JumpEvent(JumpOffset* jumpOffsetHead, JumpOffset* jumpOffsetTail,
unsigned offset):
jumpOffsetHead(jumpOffsetHead), jumpOffsetTail(jumpOffsetTail), next(0),
offset(offset)
{ }
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
bool conditional)
{
OffsetTask* task = new(c->zone) OffsetTask(c->tasks, promise, instructionOffset, conditional);
c->tasks = task;
if (conditional) {
JumpOffset* offset =
new(c->zone) JumpOffset(c->lastBlock, task, c->code.length() - c->lastBlock->offset);
if (c->lastBlock->jumpOffsetTail) {
c->lastBlock->jumpOffsetTail->next = offset;
} else {
c->lastBlock->jumpOffsetHead = offset;
}
c->lastBlock->jumpOffsetTail = offset;
}
}
void appendJumpEvent(Context* c, MyBlock* b, unsigned offset, JumpOffset* head,
JumpOffset* tail)
{
JumpEvent* e = new(c->zone) JumpEvent
(head, tail, offset);
if (b->jumpEventTail) {
b->jumpEventTail->next = e;
} else {
b->jumpEventHead = e;
}
b->jumpEventTail = e;
}
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask) {
return new (c->zone) ShiftMaskPromise(base, shift, mask);
}
void
updateImmediate(vm::System* s, void* dst, int32_t src, unsigned size, bool address)
{
switch (size) {
case 4: {
int32_t* p = static_cast<int32_t*>(dst);
int r = (vm::targetV4(p[1]) >> 21) & 31;
if (address) {
p[0] = vm::targetV4(lis(r, ha16(src)));
p[1] |= vm::targetV4(src & 0xFFFF);
} else {
p[0] = vm::targetV4(lis(r, src >> 16));
p[1] = vm::targetV4(ori(r, r, src));
}
} break;
default: abort(s);
}
}
ImmediateListener::ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset,
bool address):
s(s), dst(dst), size(size), offset(offset), address(address)
{ }
bool ImmediateListener::resolve(int64_t value, void** location) {
updateImmediate(s, dst, value, size, address);
if (location) *location = static_cast<uint8_t*>(dst) + offset;
return false;
}
ImmediateTask::ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset, bool address):
Task(next),
promise(promise),
offset(offset),
size(size),
promiseOffset(promiseOffset),
address(address)
{ }
void ImmediateTask::run(Context* c) {
if (promise->resolved()) {
updateImmediate
(c->s, c->result + offset->value(), promise->value(), size, address);
} else {
new (promise->listen(sizeof(ImmediateListener))) ImmediateListener
(c->s, c->result + offset->value(), size, promiseOffset, address);
}
}
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset, bool address)
{
c->tasks = new(c->zone) ImmediateTask(c->tasks, promise, offset, size, promiseOffset, address);
}
ConstantPoolEntry::ConstantPoolEntry(Context* c, Promise* constant):
c(c), constant(constant), next(c->constantPool), address(0)
{
c->constantPool = this;
++ c->constantPoolCount;
}
int64_t ConstantPoolEntry::value() {
assert(c, resolved());
return reinterpret_cast<intptr_t>(address);
}
bool ConstantPoolEntry::resolved() {
return address != 0;
}
ConstantPoolEntry* appendConstantPoolEntry(Context* c, Promise* constant) {
return new (c->zone) ConstantPoolEntry(c, constant);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -1,160 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H
namespace avian {
namespace codegen {
namespace powerpc {
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* c) = 0;
Task* next;
};
class OffsetPromise: public Promise {
public:
OffsetPromise(Context* c, MyBlock* block, unsigned offset);
virtual bool resolved();
virtual int64_t value();
Context* c;
MyBlock* block;
unsigned offset;
};
Promise* offsetPromise(Context* c);
void*
updateOffset(vm::System* s, uint8_t* instruction, bool conditional, int64_t value,
void* jumpAddress);
class OffsetListener: public Promise::Listener {
public:
OffsetListener(vm::System* s, uint8_t* instruction, bool conditional,
void* jumpAddress);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
uint8_t* instruction;
void* jumpAddress;
bool conditional;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
bool conditional);
virtual void run(Context* c);
Promise* promise;
Promise* instructionOffset;
void* jumpAddress;
bool conditional;
};
class JumpOffset {
public:
JumpOffset(MyBlock* block, OffsetTask* task, unsigned offset);
MyBlock* block;
OffsetTask* task;
JumpOffset* next;
unsigned offset;
};
class JumpEvent {
public:
JumpEvent(JumpOffset* jumpOffsetHead, JumpOffset* jumpOffsetTail,
unsigned offset);
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpEvent* next;
unsigned offset;
};
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
bool conditional);
void appendJumpEvent(Context* c, MyBlock* b, unsigned offset, JumpOffset* head,
JumpOffset* tail);
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask);
void updateImmediate(vm::System* s, void* dst, int32_t src, unsigned size, bool address);
class ImmediateListener: public Promise::Listener {
public:
ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset,
bool address);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
void* dst;
unsigned size;
unsigned offset;
bool address;
};
class ImmediateTask: public Task {
public:
ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset, bool address);
virtual void run(Context* c);
Promise* promise;
Promise* offset;
unsigned size;
unsigned promiseOffset;
bool address;
};
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset, bool address);
class ConstantPoolEntry: public Promise {
public:
ConstantPoolEntry(Context* c, Promise* constant);
virtual int64_t value();
virtual bool resolved();
Context* c;
Promise* constant;
ConstantPoolEntry* next;
void* address;
};
ConstantPoolEntry* appendConstantPoolEntry(Context* c, Promise* constant);
inline int ha16(int32_t i) {
return ((i >> 16) + ((i & 0x8000) ? 1 : 0)) & 0xffff;
}
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H

View File

@ -1,139 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
#include "operations.h"
#include "multimethod.h"
#include "../multimethod.h"
namespace avian {
namespace codegen {
namespace powerpc {
using namespace util;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
{
return operation
+ (lir::BinaryOperationCount * operand1)
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
}
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1)
{
assert(c, not isBranch(operation));
return operation + (lir::NonBranchTernaryOperationCount * operand1);
}
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
}
void populateTables(ArchitectureContext* c) {
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations;
BinaryOperationType* bo = c->binaryOperations;
TernaryOperationType* to = c->ternaryOperations;
BranchOperationType* bro = c->branchOperations;
zo[lir::Return] = return_;
zo[lir::LoadBarrier] = memoryBarrier;
zo[lir::StoreStoreBarrier] = memoryBarrier;
zo[lir::StoreLoadBarrier] = memoryBarrier;
zo[lir::Trap] = trap;
uo[Multimethod::index(lir::LongCall, C)] = CAST1(longCallC);
uo[Multimethod::index(lir::AlignedLongCall, C)] = CAST1(alignedLongCallC);
uo[Multimethod::index(lir::LongJump, C)] = CAST1(longJumpC);
uo[Multimethod::index(lir::AlignedLongJump, C)] = CAST1(alignedLongJumpC);
uo[Multimethod::index(lir::Jump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::Jump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::AlignedJump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::AlignedJump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::Call, C)] = CAST1(callC);
uo[Multimethod::index(lir::Call, R)] = CAST1(callR);
uo[Multimethod::index(lir::AlignedCall, C)] = CAST1(callC);
uo[Multimethod::index(lir::AlignedCall, R)] = CAST1(callR);
bo[index(c, lir::Move, R, R)] = CAST2(moveRR);
bo[index(c, lir::Move, C, R)] = CAST2(moveCR);
bo[index(c, lir::Move, C, M)] = CAST2(moveCM);
bo[index(c, lir::Move, M, R)] = CAST2(moveMR);
bo[index(c, lir::Move, R, M)] = CAST2(moveRM);
bo[index(c, lir::Move, A, R)] = CAST2(moveAR);
bo[index(c, lir::MoveZ, R, R)] = CAST2(moveZRR);
bo[index(c, lir::MoveZ, M, R)] = CAST2(moveZMR);
bo[index(c, lir::MoveZ, C, R)] = CAST2(moveCR);
bo[index(c, lir::Negate, R, R)] = CAST2(negateRR);
to[index(c, lir::Add, R)] = CAST3(addR);
to[index(c, lir::Add, C)] = CAST3(addC);
to[index(c, lir::Subtract, R)] = CAST3(subR);
to[index(c, lir::Subtract, C)] = CAST3(subC);
to[index(c, lir::Multiply, R)] = CAST3(multiplyR);
to[index(c, lir::Divide, R)] = CAST3(divideR);
to[index(c, lir::Remainder, R)] = CAST3(remainderR);
to[index(c, lir::ShiftLeft, R)] = CAST3(shiftLeftR);
to[index(c, lir::ShiftLeft, C)] = CAST3(shiftLeftC);
to[index(c, lir::ShiftRight, R)] = CAST3(shiftRightR);
to[index(c, lir::ShiftRight, C)] = CAST3(shiftRightC);
to[index(c, lir::UnsignedShiftRight, R)] = CAST3(unsignedShiftRightR);
to[index(c, lir::UnsignedShiftRight, C)] = CAST3(unsignedShiftRightC);
to[index(c, lir::And, C)] = CAST3(andC);
to[index(c, lir::And, R)] = CAST3(andR);
to[index(c, lir::Or, C)] = CAST3(orC);
to[index(c, lir::Or, R)] = CAST3(orR);
to[index(c, lir::Xor, C)] = CAST3(xorC);
to[index(c, lir::Xor, R)] = CAST3(xorR);
bro[branchIndex(c, R, R)] = CAST_BRANCH(branchRR);
bro[branchIndex(c, C, R)] = CAST_BRANCH(branchCR);
bro[branchIndex(c, C, M)] = CAST_BRANCH(branchCM);
bro[branchIndex(c, R, M)] = CAST_BRANCH(branchRM);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -1,41 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST3(x) reinterpret_cast<TernaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace powerpc {
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1);
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2);
void populateTables(ArchitectureContext* c);
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -1,197 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H
#include "context.h"
namespace avian {
namespace codegen {
namespace powerpc {
inline void emit(Context* con, int code) { con->code.append4(vm::targetV4(code)); }
inline int newTemp(Context* con) { return con->client->acquireTemporary(); }
inline void freeTemp(Context* con, int r) { con->client->releaseTemporary(r); }
inline int64_t getValue(lir::Constant* c) { return c->value->value(); }
void andC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void shiftLeftR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void moveRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void shiftLeftC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void shiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void shiftRightC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* target);
void swapRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void moveRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void moveZRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned, lir::Register* dst);
void moveCR2(Context* c, unsigned, lir::Constant* src,
unsigned dstSize, lir::Register* dst, unsigned promiseOffset);
void moveCR(Context* c, unsigned srcSize, lir::Constant* src,
unsigned dstSize, lir::Register* dst);
void addR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void addC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void subR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void subC(Context* c, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void multiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void divideR(Context* con, unsigned size UNUSED, lir::Register* a, lir::Register* b, lir::Register* t);
void remainderR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
int
normalize(Context* c, int offset, int index, unsigned scale,
bool* preserveIndex, bool* release);
void store(Context* c, unsigned size, lir::Register* src,
int base, int offset, int index, unsigned scale, bool preserveIndex);
void moveRM(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Memory* dst);
void moveAndUpdateRM(Context* c, unsigned srcSize UNUSED, lir::Register* src,
unsigned dstSize UNUSED, lir::Memory* dst);
void load(Context* c, unsigned srcSize, int base, int offset, int index,
unsigned scale, unsigned dstSize, lir::Register* dst,
bool preserveIndex, bool signExtend);
void moveMR(Context* c, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void moveZMR(Context* c, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void andR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void andC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void orR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void orC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void xorR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void xorC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void moveAR2(Context* c, unsigned srcSize UNUSED, lir::Address* src,
unsigned dstSize, lir::Register* dst, unsigned promiseOffset);
void moveAR(Context* c, unsigned srcSize, lir::Address* src,
unsigned dstSize, lir::Register* dst);
void compareRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void compareRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Memory* b);
void compareUnsignedRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareUnsignedCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
int32_t branch(Context* c, lir::TernaryOperation op);
void conditional(Context* c, int32_t branch, lir::Constant* target);
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target);
void branchLong(Context* c, lir::TernaryOperation op, lir::Operand* al,
lir::Operand* ah, lir::Operand* bl,
lir::Operand* bh, lir::Constant* target,
BinaryOperationType compareSigned,
BinaryOperationType compareUnsigned);
void branchRR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Register* b,
lir::Constant* target);
void branchCR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Register* b,
lir::Constant* target);
void branchRM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Memory* b,
lir::Constant* target);
void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Memory* b,
lir::Constant* target);
void moveCM(Context* c, unsigned srcSize, lir::Constant* src,
unsigned dstSize, lir::Memory* dst);
void negateRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Register* dst);
void callR(Context* c, unsigned size UNUSED, lir::Register* target);
void callC(Context* c, unsigned size UNUSED, lir::Constant* target);
void longCallC(Context* c, unsigned size UNUSED, lir::Constant* target);
void alignedLongCallC(Context* c, unsigned size UNUSED, lir::Constant* target);
void longJumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void alignedLongJumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void return_(Context* c);
void trap(Context* c);
void memoryBarrier(Context* c);
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H

View File

@ -1,23 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H
namespace avian {
namespace codegen {
namespace powerpc {
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H

View File

@ -29,8 +29,6 @@ Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures
return makeArchitectureX86(system, useNativeFeatures);
#elif AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM
return makeArchitectureArm(system, useNativeFeatures);
#elif AVIAN_TARGET_ARCH == AVIAN_ARCH_POWERPC
return makeArchitecturePowerpc(system, useNativeFeatures);
#else
#error "Unsupported codegen target"
#endif

View File

@ -1,295 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
.text
#define BYTES_PER_WORD 4
#ifdef __APPLE__
# define GLOBAL(x) _##x
# define LOCAL(x) L##x
# define LINKAGE_AREA 6
# define RETURN_ADDRESS_OFFSET 8
#else
# define GLOBAL(x) x
# define LOCAL(x) .L##x
# define LINKAGE_AREA 2
# define RETURN_ADDRESS_OFFSET 4
# include "powerpc-regs.S"
#endif
#define ARGUMENT_BASE BYTES_PER_WORD * LINKAGE_AREA
#define CONTINUATION_NEXT 4
#define CONTINUATION_ADDRESS 16
#define CONTINUATION_RETURN_ADDRESS_OFFSET 20
#define CONTINUATION_FRAME_POINTER_OFFSET 24
#define CONTINUATION_LENGTH 28
#define CONTINUATION_BODY 32
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
// save return address
mflr r0
stw r0,RETURN_ADDRESS_OFFSET(r1)
// r3: thread
// r4: function
// r5: arguments
// r6: argumentFootprint
// r7: frameSize
// r8: returnType
// r9: temporary
// allocate stack space, adding room for callee-saved registers and
// return type
subfic r9,r7,-80
stwux r1,r1,r9
// save callee-saved registers
add r9,r7,r1
stw r13,0(r9)
stw r14,4(r9)
stw r15,8(r9)
stw r16,12(r9)
stw r17,16(r9)
stw r18,20(r9)
stw r19,24(r9)
stw r20,28(r9)
stw r21,32(r9)
stw r22,36(r9)
stw r23,40(r9)
stw r24,44(r9)
stw r25,48(r9)
stw r26,52(r9)
stw r27,56(r9)
stw r28,60(r9)
stw r29,64(r9)
stw r30,68(r9)
stw r31,72(r9)
// save return type
stw r8,76(r9)
// we use r13 to hold the thread pointer, by convention
mr r13,r3
// copy arguments into place
li r16,0
addi r18,r1,ARGUMENT_BASE
b LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
lwzx r17,r16,r5
stwx r17,r16,r18
addi r16,r16,BYTES_PER_WORD
LOCAL(vmInvoke_argumentTest):
cmplw r16,r6
blt LOCAL(vmInvoke_argumentLoop)
// load and call function address
mtctr r4
bctrl
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
lwz r1,0(r1)
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
li r5,0
stw r5,TARGET_THREAD_STACK(r13)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
// call the next continuation, if any
lwz r5,TARGET_THREAD_CONTINUATION(r13)
cmplwi r5,0
beq LOCAL(vmInvoke_exit)
lwz r6,CONTINUATION_LENGTH(r5)
slwi r6,r6,2
subfic r7,r6,-80
stwux r1,r1,r7
addi r7,r5,CONTINUATION_BODY
li r8,0
addi r10,r1,ARGUMENT_BASE
b LOCAL(vmInvoke_continuationTest)
LOCAL(vmInvoke_continuationLoop):
lwzx r9,r7,r8
stwx r9,r10,r8
addi r8,r8,4
LOCAL(vmInvoke_continuationTest):
cmplw r8,r6
ble LOCAL(vmInvoke_continuationLoop)
lwz r7,CONTINUATION_RETURN_ADDRESS_OFFSET(r5)
bl LOCAL(vmInvoke_getPC)
LOCAL(vmInvoke_getPC):
mflr r10
#ifdef __APPLE__
la r10,lo16(GLOBAL(vmInvoke_returnAddress)-LOCAL(vmInvoke_getPC))(r10)
#else
lwz r10,LOCAL(vmInvoke_returnAddress_address)-LOCAL(vmInvoke_getPC)(r10)
#endif
stwx r10,r1,r7
lwz r7,CONTINUATION_FRAME_POINTER_OFFSET(r5)
lwz r8,0(r1)
add r7,r7,r1
stw r8,0(r7)
stw r7,0(r1)
lwz r7,CONTINUATION_NEXT(r5)
stw r7,TARGET_THREAD_CONTINUATION(r13)
// call the continuation unless we're handling an exception
lwz r7,TARGET_THREAD_EXCEPTION(r13)
cmpwi r7,0
bne LOCAL(vmInvoke_handleException)
lwz r7,CONTINUATION_ADDRESS(r5)
mtctr r7
bctr
LOCAL(vmInvoke_handleException):
// we're handling an exception - call the exception handler instead
li r8,0
stw r8,TARGET_THREAD_EXCEPTION(r13)
lwz r8,TARGET_THREAD_EXCEPTIONSTACKADJUSTMENT(r13)
lwz r9,0(r1)
subfic r8,r8,0
stwux r9,r1,r8
lwz r8,TARGET_THREAD_EXCEPTIONOFFSET(r13)
stwx r7,r1,r8
lwz r7,TARGET_THREAD_EXCEPTIONHANDLER(r13)
mtctr r7
bctr
LOCAL(vmInvoke_exit):
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
subi r9,r1,80
lwz r13,0(r9)
lwz r14,4(r9)
lwz r15,8(r9)
lwz r16,12(r9)
lwz r17,16(r9)
lwz r18,20(r9)
lwz r19,24(r9)
lwz r20,28(r9)
lwz r21,32(r9)
lwz r22,36(r9)
lwz r23,40(r9)
lwz r24,44(r9)
lwz r25,48(r9)
lwz r26,52(r9)
lwz r27,56(r9)
lwz r28,60(r9)
lwz r29,64(r9)
lwz r30,68(r9)
lwz r31,72(r9)
// handle return value based on expected type
lwz r8,76(r9)
LOCAL(vmInvoke_return):
// load return address
lwz r0,RETURN_ADDRESS_OFFSET(r1)
mtlr r0
// return
blr
#ifndef __APPLE__
LOCAL(vmInvoke_returnAddress_address):
.long GLOBAL(vmInvoke_returnAddress)
#endif
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// r3: thread
// r4: address
// r5: stack
// r6: argumentFootprint
// r7: arguments
// r8: frameSize
// restore (pseudo)-stack pointer (we don't want to touch the real
// stack pointer, since we haven't copied the arguments yet)
lwz r5,0(r5)
// make everything between r1 and r5 one big stack frame while we
// shuffle things around
stw r5,0(r1)
// allocate new frame, adding room for callee-saved registers
subfic r10,r8,-80
stwux r5,r5,r10
mr r13,r3
// copy arguments into place
li r8,0
addi r11,r5,ARGUMENT_BASE
b LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
lwzx r12,r7,r8
stwx r12,r11,r8
addi r8,r8,4
LOCAL(vmJumpAndInvoke_argumentTest):
cmplw r8,r6
ble LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
mr r1,r5
// set return address to vmInvoke_returnAddress
bl LOCAL(vmJumpAndInvoke_getPC)
LOCAL(vmJumpAndInvoke_getPC):
mflr r10
#ifdef __APPLE__
la r10,lo16(GLOBAL(vmInvoke_returnAddress)-LOCAL(vmJumpAndInvoke_getPC))(r10)
#else
lwz r10,LOCAL(vmInvoke_returnAddress_address)-LOCAL(vmJumpAndInvoke_getPC)(r10)
#endif
mtlr r10
mtctr r4
bctr
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
trap
#endif // not AVIAN_CONTINUATIONS

View File

@ -8981,10 +8981,10 @@ class MyProcessor: public Processor {
} else if (isThunk(t, ip) or isVirtualThunk(t, ip)) {
// we caught the thread in a thunk where the stack register
// indicates the most recent Java frame on the stack
// On e.g. x86, the return address will have already been
// pushed onto the stack, in which case we use getIp to
// retrieve it. On e.g. PowerPC and ARM, it will be in the
// retrieve it. On e.g. ARM, it will be in the
// link register. Note that we can't just check if the link
// argument is null here, since we use ecx/rcx as a
// pseudo-link register on x86 for the purpose of tail

View File

@ -1,259 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
.text
#define BYTES_PER_WORD 4
#define GPR_COUNT 8
#ifdef __APPLE__
# define GLOBAL(x) _##x
# define LOCAL(x) L##x
# define LINKAGE_AREA 6
# define MEMORY_BASE BYTES_PER_WORD * (LINKAGE_AREA + GPR_COUNT)
# define RETURN_ADDRESS_OFFSET 8
#else
# define GLOBAL(x) x
# define LOCAL(x) .L##x
# define LINKAGE_AREA 2
# define MEMORY_BASE BYTES_PER_WORD * LINKAGE_AREA
# define RETURN_ADDRESS_OFFSET 4
# include "powerpc-regs.S"
#endif
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
// save return address
mflr r0
stw r0,RETURN_ADDRESS_OFFSET(r1)
// r3 aka r13: function
// r4 : stackTotal
// r5 : memoryTable
// r6 : memoryCount
// r7 : memoryBase
// r8 : gprTable
// r9 : fprTable
// r10 aka r14: returnType
// r15 : stack frame size
// r16 : temporary
// r17 : temporary
// r18 : temporary
// allocate stack space, adding room for callee-saved registers and
// scratch space for copying a FP return value into GPRs
subfic r11,r4,-48
stwux r1,r1,r11
// save callee-saved registers used for local variables
add r11,r4,r1
// save registers used for local variables
stw r13,0(r11)
stw r14,4(r11)
stw r15,8(r11)
stw r16,12(r11)
stw r17,16(r11)
stw r18,20(r11)
stw r19,24(r11)
// remember where we saved the local variables
mr r19,r11
// save our argument registers so we can clobber them
mr r13,r3
mr r14,r10
li r16,0
b LOCAL(test)
LOCAL(loop):
lwzx r17,r16,r5
add r18,r16,r7
stwx r17,r18,r1
addi r16,r16,BYTES_PER_WORD
LOCAL(test):
cmplw r16,r6
blt LOCAL(loop)
// do we need to load the floating point registers?
cmpwi r9,0
beq LOCAL(gpr)
// yes, we do
lfd f1,0(r9)
lfd f2,8(r9)
lfd f3,16(r9)
lfd f4,24(r9)
lfd f5,32(r9)
lfd f6,40(r9)
lfd f7,48(r9)
lfd f8,56(r9)
#ifdef __APPLE__
lfd f9,64(r9)
lfd f10,72(r9)
lfd f11,80(r9)
lfd f12,88(r9)
lfd f13,96(r9)
#endif
LOCAL(gpr):
// do we need to load the general-purpose registers?
cmpwi r8,0
beq LOCAL(call)
// yes, we do
mr r16,r8
lwz r3,0(r16)
lwz r4,4(r16)
lwz r5,8(r16)
lwz r6,12(r16)
lwz r7,16(r16)
lwz r8,20(r16)
lwz r9,24(r16)
lwz r10,28(r16)
LOCAL(call):
// load and call function address
mtctr r13
bctrl
// handle return value based on expected type
cmpwi r14,VOID_TYPE
bne LOCAL(float)
b LOCAL(exit)
LOCAL(float):
cmpwi r14,FLOAT_TYPE
bne LOCAL(double)
stfs f1,32(r19)
lwz r4,32(r19)
b LOCAL(exit)
LOCAL(double):
cmpwi r14,DOUBLE_TYPE
bne LOCAL(int64)
stfd f1,32(r19)
lwz r3,32(r19)
lwz r4,36(r19)
b LOCAL(exit)
LOCAL(int64):
cmpwi r14,INT64_TYPE
beq LOCAL(exit)
mr r4,r3
b LOCAL(exit)
LOCAL(copy):
// move floating point return value to GPRs via memory
stfd f1,32(r19)
lwz r3,32(r19)
lwz r4,36(r19)
b LOCAL(exit)
LOCAL(exit):
// restore callee-saved registers used for local variables
lwz r13,0(r19)
lwz r14,4(r19)
lwz r15,8(r19)
lwz r16,12(r19)
lwz r17,16(r19)
lwz r18,20(r19)
lwz r19,24(r19)
// restore stack pointer
lwz r1,0(r1)
// load return address
lwz r0,RETURN_ADDRESS_OFFSET(r1)
mtlr r0
// return
blr
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
mtlr r3
mr r1,r5
mr r13,r6
mr r4,r7
mr r3,r8
blr
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// r3: function
// r4: arguments
// r5: checkpoint
mflr r0
stw r0,RETURN_ADDRESS_OFFSET(r1)
stwu r1,-(MEMORY_BASE+88)(r1)
stw r13,MEMORY_BASE+0(r1)
stw r14,MEMORY_BASE+4(r1)
stw r15,MEMORY_BASE+8(r1)
stw r16,MEMORY_BASE+12(r1)
stw r17,MEMORY_BASE+16(r1)
stw r18,MEMORY_BASE+20(r1)
stw r19,MEMORY_BASE+24(r1)
stw r20,MEMORY_BASE+28(r1)
stw r21,MEMORY_BASE+32(r1)
stw r22,MEMORY_BASE+36(r1)
stw r23,MEMORY_BASE+40(r1)
stw r24,MEMORY_BASE+44(r1)
stw r25,MEMORY_BASE+48(r1)
stw r26,MEMORY_BASE+52(r1)
stw r27,MEMORY_BASE+56(r1)
stw r28,MEMORY_BASE+60(r1)
stw r29,MEMORY_BASE+64(r1)
stw r30,MEMORY_BASE+68(r1)
stw r31,MEMORY_BASE+72(r1)
stw r1,CHECKPOINT_STACK(r5)
mr r6,r3
lwz r3,CHECKPOINT_THREAD(r5)
mtctr r6
bctrl
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
lwz r13,MEMORY_BASE+0(r1)
lwz r14,MEMORY_BASE+4(r1)
lwz r15,MEMORY_BASE+8(r1)
lwz r16,MEMORY_BASE+12(r1)
lwz r17,MEMORY_BASE+16(r1)
lwz r18,MEMORY_BASE+20(r1)
lwz r19,MEMORY_BASE+24(r1)
lwz r20,MEMORY_BASE+28(r1)
lwz r21,MEMORY_BASE+32(r1)
lwz r22,MEMORY_BASE+36(r1)
lwz r23,MEMORY_BASE+40(r1)
lwz r24,MEMORY_BASE+44(r1)
lwz r25,MEMORY_BASE+48(r1)
lwz r26,MEMORY_BASE+52(r1)
lwz r27,MEMORY_BASE+56(r1)
lwz r28,MEMORY_BASE+60(r1)
lwz r29,MEMORY_BASE+64(r1)
lwz r30,MEMORY_BASE+68(r1)
lwz r31,MEMORY_BASE+72(r1)
lwz r1,0(r1)
lwz r0,RETURN_ADDRESS_OFFSET(r1)
mtlr r0
blr

View File

@ -1907,9 +1907,9 @@ main(int ac, const char** av)
// todo: currently, the compiler cannot compile code with jumps or
// calls spanning more than the maximum size of an immediate value
// in a branch instruction for the target architecture (~32MB on
// PowerPC and ARM). When that limitation is removed, we'll be able
// to specify a capacity as large as we like here:
// in a branch instruction for the target architecture (~32MB on ARM).
// When that limitation is removed, we'll be able to specify a
// capacity as large as we like here:
#if (AVIAN_TARGET_ARCH == AVIAN_ARCH_X86_64) \
|| (AVIAN_TARGET_ARCH == AVIAN_ARCH_X86)
const unsigned CodeCapacity = 128 * 1024 * 1024;

View File

@ -49,7 +49,6 @@
#define EM_386 3
#define EM_X86_64 62
#define EM_ARM 40
#define EM_PPC 20
#define SHT_PROGBITS 1
#define SHT_SYMTAB 2
@ -130,8 +129,6 @@ unsigned getElfPlatform(PlatformInfo::Architecture arch) {
return EM_386;
case PlatformInfo::Arm:
return EM_ARM;
case PlatformInfo::PowerPC:
return EM_PPC;
default:
return ~0;
}
@ -375,7 +372,6 @@ public:
ElfPlatform<uint32_t> elfX86Platform(PlatformInfo::x86);
ElfPlatform<uint32_t> elfArmPlatform(PlatformInfo::Arm);
ElfPlatform<uint32_t, false> elfPowerPCPlatform(PlatformInfo::PowerPC);
ElfPlatform<uint64_t> elfX86_64Platform(PlatformInfo::x86_64);
} // namespace

View File

@ -32,13 +32,11 @@
#define CPU_TYPE_I386 7
#define CPU_TYPE_X86_64 (CPU_TYPE_I386 | CPU_ARCH_ABI64)
#define CPU_TYPE_POWERPC 18
#define CPU_TYPE_ARM 12
#define CPU_SUBTYPE_I386_ALL 3
#define CPU_SUBTYPE_X86_64_ALL CPU_SUBTYPE_I386_ALL
#define CPU_SUBTYPE_POWERPC_ALL 0
#define CPU_SUBTYPE_ARM_V7 9
#define CPU_SUBTYPE_ARM_V7 9
namespace {
@ -153,10 +151,6 @@ public:
cpuType = CPU_TYPE_I386;
cpuSubType = CPU_SUBTYPE_I386_ALL;
break;
case PlatformInfo::PowerPC:
cpuType = CPU_TYPE_POWERPC;
cpuSubType = CPU_SUBTYPE_POWERPC_ALL;
break;
case PlatformInfo::Arm:
cpuType = CPU_TYPE_ARM;
cpuSubType = CPU_SUBTYPE_ARM_V7;
@ -293,7 +287,6 @@ public:
MachOPlatform<uint32_t> darwinx86Platform(PlatformInfo::x86);
MachOPlatform<uint32_t> darwinArmPlatform(PlatformInfo::Arm);
MachOPlatform<uint32_t, false> darwinPowerPCPlatform(PlatformInfo::PowerPC);
MachOPlatform<uint64_t> darwinx86_64Platform(PlatformInfo::x86_64);
} // namespace

View File

@ -108,8 +108,6 @@ PlatformInfo::Architecture PlatformInfo::archFromString(const char* arch) {
return x86;
} else if(strcmp(arch, "x86_64") == 0) {
return x86_64;
} else if(strcmp(arch, "powerpc") == 0) {
return PowerPC;
} else if(strcmp(arch, "arm") == 0) {
return Arm;
} else {