Merge remote-tracking branch 'jw/master'

This commit is contained in:
Joel Dice 2013-03-04 09:37:22 -07:00
commit a142a46d41
72 changed files with 12045 additions and 10005 deletions

View File

@ -0,0 +1,46 @@
/* Copyright (c) 2008-2011, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_UTIL_ARG_PARSER_H
#define AVIAN_UTIL_ARG_PARSER_H
namespace avian {
namespace util {
class Arg;
class ArgParser {
public:
Arg* first;
Arg** last;
ArgParser();
bool parse(int ac, const char* const* av);
void printUsage(const char* exe);
};
class Arg {
public:
Arg* next;
bool required;
const char* name;
const char* desc;
const char* value;
Arg(ArgParser& parser, bool required, const char* name, const char* desc);
};
} // namespace avian
} // namespace util
#endif // AVIAN_UTIL_ARG_PARSER_H

View File

@ -0,0 +1,137 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ARCHITECTURE_H
#define AVIAN_CODEGEN_ARCHITECTURE_H
namespace vm {
class Allocator;
class Zone;
}
namespace avian {
namespace codegen {
class Assembler;
class RegisterFile;
class OperandMask {
public:
uint8_t typeMask;
uint64_t registerMask;
OperandMask(uint8_t typeMask, uint64_t registerMask):
typeMask(typeMask),
registerMask(registerMask)
{ }
OperandMask():
typeMask(~0),
registerMask(~static_cast<uint64_t>(0))
{ }
};
class Architecture {
public:
virtual unsigned floatRegisterSize() = 0;
virtual const RegisterFile* registerFile() = 0;
virtual int scratch() = 0;
virtual int stack() = 0;
virtual int thread() = 0;
virtual int returnLow() = 0;
virtual int returnHigh() = 0;
virtual int virtualCallTarget() = 0;
virtual int virtualCallIndex() = 0;
virtual bool bigEndian() = 0;
virtual uintptr_t maximumImmediateJump() = 0;
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
virtual bool reserved(int register_) = 0;
virtual unsigned frameFootprint(unsigned footprint) = 0;
virtual unsigned argumentFootprint(unsigned footprint) = 0;
virtual bool argumentAlignment() = 0;
virtual bool argumentRegisterAlignment() = 0;
virtual unsigned argumentRegisterCount() = 0;
virtual int argumentRegister(unsigned index) = 0;
virtual bool hasLinkRegister() = 0;
virtual unsigned stackAlignmentInWords() = 0;
virtual bool matchCall(void* returnAddress, void* target) = 0;
virtual void updateCall(lir::UnaryOperation op, void* returnAddress,
void* newTarget) = 0;
virtual void setConstant(void* dst, uint64_t constant) = 0;
virtual unsigned alignFrameSize(unsigned sizeInWords) = 0;
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
void* link, bool mostRecent,
unsigned targetParameterFootprint, void** ip,
void** stack) = 0;
virtual void* frameIp(void* stack) = 0;
virtual unsigned frameHeaderSize() = 0;
virtual unsigned frameReturnAddressSize() = 0;
virtual unsigned frameFooterSize() = 0;
virtual int returnAddressOffset() = 0;
virtual int framePointerOffset() = 0;
virtual void plan
(lir::UnaryOperation op,
unsigned aSize, OperandMask& aMask,
bool* thunk) = 0;
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk) = 0;
virtual void planDestination
(lir::BinaryOperation op,
unsigned aSize, const OperandMask& aMask,
unsigned bSize, OperandMask& bMask) = 0;
virtual void planMove
(unsigned size, OperandMask& src,
OperandMask& tmp,
const OperandMask& dst) = 0;
virtual void planSource
(lir::TernaryOperation op,
unsigned aSize, OperandMask& aMask,
unsigned bSize, OperandMask& bMask,
unsigned cSize, bool* thunk) = 0;
virtual void planDestination
(lir::TernaryOperation op,
unsigned aSize, const OperandMask& aMask,
unsigned bSize, const OperandMask& bMask,
unsigned cSize, OperandMask& cMask) = 0;
virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0;
virtual void acquire() = 0;
virtual void release() = 0;
};
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ARCHITECTURE_H

View File

@ -20,7 +20,7 @@
namespace avian {
namespace codegen {
class RegisterFile;
class Architecture;
class OperandInfo {
public:
@ -64,98 +64,6 @@ class Assembler {
virtual unsigned resolve(unsigned start, Block* next) = 0;
};
class Architecture {
public:
virtual unsigned floatRegisterSize() = 0;
virtual const RegisterFile* registerFile() = 0;
virtual int scratch() = 0;
virtual int stack() = 0;
virtual int thread() = 0;
virtual int returnLow() = 0;
virtual int returnHigh() = 0;
virtual int virtualCallTarget() = 0;
virtual int virtualCallIndex() = 0;
virtual bool bigEndian() = 0;
virtual uintptr_t maximumImmediateJump() = 0;
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
virtual bool reserved(int register_) = 0;
virtual unsigned frameFootprint(unsigned footprint) = 0;
virtual unsigned argumentFootprint(unsigned footprint) = 0;
virtual bool argumentAlignment() = 0;
virtual bool argumentRegisterAlignment() = 0;
virtual unsigned argumentRegisterCount() = 0;
virtual int argumentRegister(unsigned index) = 0;
virtual bool hasLinkRegister() = 0;
virtual unsigned stackAlignmentInWords() = 0;
virtual bool matchCall(void* returnAddress, void* target) = 0;
virtual void updateCall(lir::UnaryOperation op, void* returnAddress,
void* newTarget) = 0;
virtual void setConstant(void* dst, uint64_t constant) = 0;
virtual unsigned alignFrameSize(unsigned sizeInWords) = 0;
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
void* link, bool mostRecent,
unsigned targetParameterFootprint, void** ip,
void** stack) = 0;
virtual void* frameIp(void* stack) = 0;
virtual unsigned frameHeaderSize() = 0;
virtual unsigned frameReturnAddressSize() = 0;
virtual unsigned frameFooterSize() = 0;
virtual int returnAddressOffset() = 0;
virtual int framePointerOffset() = 0;
virtual void plan
(lir::UnaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
bool* thunk) = 0;
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned bSize, bool* thunk) = 0;
virtual void planDestination
(lir::BinaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask) = 0;
virtual void planMove
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask) = 0;
virtual void planSource
(lir::TernaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
unsigned cSize, bool* thunk) = 0;
virtual void planDestination
(lir::TernaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
unsigned bSize, uint8_t bTypeMask, uint64_t bRegisterMask,
unsigned cSize, uint8_t* cTypeMask, uint64_t* cRegisterMask) = 0;
virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0;
virtual void acquire() = 0;
virtual void release() = 0;
};
virtual void setClient(Client* client) = 0;
virtual Architecture* arch() = 0;

View File

@ -45,6 +45,28 @@ public:
{ }
};
class RegisterIterator {
public:
int index;
const RegisterMask& mask;
inline RegisterIterator(const RegisterMask& mask):
index(mask.start),
mask(mask) {}
inline bool hasNext() {
return index < mask.limit;
}
inline int next() {
int r = index;
do {
index++;
} while(index < mask.limit && !(mask.mask & (1 << index)));
return r;
}
};
} // namespace codegen
} // namespace avian

View File

@ -11,16 +11,20 @@
#ifndef AVIAN_CODEGEN_TARGETS_H
#define AVIAN_CODEGEN_TARGETS_H
#include <avian/vm/codegen/assembler.h>
namespace vm {
class System;
}
namespace avian {
namespace codegen {
Assembler::Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures);
class Architecture;
Assembler::Architecture* makeArchitectureX86(vm::System* system, bool useNativeFeatures);
Assembler::Architecture* makeArchitectureArm(vm::System* system, bool useNativeFeatures);
Assembler::Architecture* makeArchitecturePowerpc(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitectureX86(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitectureArm(vm::System* system, bool useNativeFeatures);
Architecture* makeArchitecturePowerpc(vm::System* system, bool useNativeFeatures);
} // namespace codegen
} // namespace avian

144
makefile
View File

@ -1022,26 +1022,33 @@ embed-objects = $(call cpp-objects,$(embed-sources),$(src),$(build-embed))
compiler-sources = \
$(src)/codegen/compiler.cpp \
$(src)/codegen/compiler/context.cpp \
$(src)/codegen/compiler/resource.cpp \
$(src)/codegen/compiler/site.cpp \
$(src)/codegen/compiler/regalloc.cpp \
$(src)/codegen/compiler/value.cpp \
$(src)/codegen/compiler/read.cpp \
$(src)/codegen/compiler/event.cpp \
$(src)/codegen/compiler/promise.cpp \
$(src)/codegen/compiler/frame.cpp \
$(src)/codegen/compiler/ir.cpp \
$(wildcard $(src)/codegen/compiler/*.cpp) \
$(src)/codegen/registers.cpp \
$(src)/codegen/targets.cpp
compiler-objects = $(call cpp-objects,$(compiler-sources),$(src),$(build))
$(compiler-objects): $(wildcard $(src)/codegen/compiler/*.h) $(vm-depends)
x86-assembler-sources = $(wildcard $(src)/codegen/target/x86/*.cpp)
x86-assembler-objects = $(call cpp-objects,$(x86-assembler-sources),$(src),$(build))
$(x86-assembler-objects): $(wildcard $(src)/codegen/target/x86/*.h) $(vm-depends)
arm-assembler-sources = $(wildcard $(src)/codegen/target/arm/*.cpp)
arm-assembler-objects = $(call cpp-objects,$(arm-assembler-sources),$(src),$(build))
$(arm-assembler-objects): $(wildcard $(src)/codegen/target/arm/*.h) $(vm-depends)
powerpc-assembler-sources = $(wildcard $(src)/codegen/target/powerpc/*.cpp)
powerpc-assembler-objects = $(call cpp-objects,$(powerpc-assembler-sources),$(src),$(build))
$(powerpc-assembler-objects): $(wildcard $(src)/codegen/target/powerpc/*.h) $(vm-depends)
all-assembler-sources = \
$(src)/codegen/x86/assembler.cpp \
$(src)/codegen/arm/assembler.cpp \
$(src)/codegen/powerpc/assembler.cpp
$(x86-assembler-sources) \
$(arm-assembler-sources) \
$(powerpc-assembler-sources)
native-assembler-sources = \
$(src)/codegen/$(target-asm)/assembler.cpp
native-assembler-sources = $($(target-asm)-assembler-sources)
native-assembler-objects = $($(target-asm)-assembler-objects)
audit-codegen-sources = $(wildcard $(src)/tools/audit-codegen/*.cpp)
all-codegen-target-sources = \
$(compiler-sources) \
@ -1090,7 +1097,8 @@ ifeq ($(continuations),true)
asmflags += -DAVIAN_CONTINUATIONS
endif
bootimage-generator-sources = $(src)/tools/bootimage-generator/main.cpp
bootimage-generator-sources = $(src)/tools/bootimage-generator/main.cpp $(src)/util/arg-parser.cpp
ifneq ($(lzma),)
bootimage-generator-sources += $(src)/lzma-encode.cpp
endif
@ -1205,6 +1213,7 @@ dynamic-library = $(build)/$(so-prefix)jvm$(so-suffix)
executable-dynamic = $(build)/$(name)-dynamic$(exe-suffix)
unittest-executable = $(build)/$(name)-unittest${exe-suffix}
audit-codegen-executable = $(build)/audit-codegen${exe-suffix}
ifneq ($(classpath),avian)
# Assembler, ConstantPool, and Stream are not technically needed for a
@ -1275,6 +1284,7 @@ test-extra-dep = $(test-build)-extra.dep
unittest-sources = \
$(wildcard $(unittest)/*.cpp) \
$(wildcard $(unittest)/util/*.cpp) \
$(wildcard $(unittest)/codegen/*.cpp)
unittest-depends = \
@ -1364,6 +1374,14 @@ else
ssh -p$(remote-test-port) $(remote-test-user)@$(remote-test-host) sh "$(remote-test-dir)/$(platform)-$(arch)$(options)/run-tests.sh"
endif
PHONY: audit-baseline
audit-baseline: $(audit-codegen-executable)
$(<) -output $(build)/codegen-audit-output/baseline.o -format macho
PHONY: audit
audit: $(audit-codegen-executable)
$(<) -output $(build)/codegen-audit-output/baseline.o -format macho
.PHONY: tarball
tarball:
@echo "creating build/avian-$(version).tar.bz2"
@ -1503,6 +1521,9 @@ endif
$(unittest-objects): $(build)/unittest/%.o: $(unittest)/%.cpp $(vm-depends) $(unittest-depends)
$(compile-unittest-object)
$(build)/tools/audit-codegen/main.o: $(build)/%.o: $(src)/%.cpp $(vm-depends)
$(compile-object)
$(test-cpp-objects): $(test-build)/%.o: $(test)/%.cpp $(vm-depends)
$(compile-object)
@ -1542,19 +1563,18 @@ $(embed-loader-o): $(embed-loader) $(converter)
$(converter) $(<) $(@) _binary_loader_start \
_binary_loader_end $(target-format) $(arch)
$(embed-loader): $(embed-loader-objects) $(static-library)
@mkdir -p $(dir $(@))
cd $(dir $(@)) && $(ar) x ../../../$(static-library)
$(embed-loader): $(embed-loader-objects) $(vm-objects) $(classpath-objects) $(vm-heapwalk-objects) \
$(javahome-object) $(boot-javahome-object) $(lzma-decode-objects)
ifdef ms_cl_compiler
$(ld) $(lflags) $(dir $(@))/*.o -out:$(@) \
$(ld) $(lflags) $(^) -out:$(@) \
-debug -PDB:$(subst $(exe-suffix),.pdb,$(@)) $(manifest-flags)
ifdef mt
$(mt) -nologo -manifest $(@).manifest -outputresource:"$(@);1"
endif
else
$(dlltool) -z $(addsuffix .def,$(basename $(@))) $(dir $(@))/*.o
$(dlltool) -z $(addsuffix .def,$(basename $(@))) $(^)
$(dlltool) -d $(addsuffix .def,$(basename $(@))) -e $(addsuffix .exp,$(basename $(@)))
$(ld) $(addsuffix .exp,$(basename $(@))) $(dir $(@))/*.o \
$(ld) $(addsuffix .exp,$(basename $(@))) $(^) \
$(lflags) $(bootimage-lflags) -o $(@)
endif
$(strip) $(strip-all) $(@)
@ -1677,49 +1697,59 @@ executable-objects = $(vm-objects) $(classpath-objects) $(driver-object) \
$(javahome-object) $(boot-javahome-object) $(lzma-decode-objects)
unittest-executable-objects = $(unittest-objects) $(vm-objects) \
$(classpath-objects)
$(classpath-objects) $(build)/util/arg-parser.o
ifeq ($(process),interpret)
unittest-executable-objects += $(all-codegen-target-objects)
endif
$(executable): $(executable-objects)
@echo "linking $(@)"
ifeq ($(platform),windows)
ifdef ms_cl_compiler
$(ld) $(lflags) $(executable-objects) -out:$(@) \
-debug -PDB:$(subst $(exe-suffix),.pdb,$(@)) $(manifest-flags)
ifdef mt
$(mt) -nologo -manifest $(@).manifest -outputresource:"$(@);1"
endif
else
$(dlltool) -z $(@).def $(executable-objects)
$(dlltool) -d $(@).def -e $(@).exp
$(ld) $(@).exp $(executable-objects) $(lflags) -o $(@)
endif
else
$(ld) $(executable-objects) $(rdynamic) $(lflags) $(bootimage-lflags) -o $(@)
endif
$(strip) $(strip-all) $(@)
audit-codegen-objects = $(call cpp-objects,$(audit-codegen-sources),$(src),$(build))
audit-codegen-executable-objects = $(audit-codegen-objects) $(vm-objects) $(build)/util/arg-parser.o
.PHONY: print
print:
@echo $(audit-codegen-objects)
# apparently, make does poorly with ifs inside of defines, and indented defines.
# I suggest re-indenting the following before making edits (and unindenting afterwards):
ifneq ($(platform),windows)
define link-executable
@echo linking $(@)
$(ld) $(^) $(rdynamic) $(lflags) $(bootimage-lflags) -o $(@)
endef
else
ifdef ms_cl_compiler
ifdef mt
define link-executable
@echo linking $(@)
$(ld) $(lflags) $(^) -out:$(@) \
-debug -PDB:$(subst $(exe-suffix),.pdb,$(@)) $(manifest-flags)
$(mt) -nologo -manifest $(@).manifest -outputresource:"$(@);1"
endef
else
define link-executable
@echo linking $(@)
$(mt) -nologo -manifest $(@).manifest -outputresource:"$(@);1"
endef
endif
else
define link-executable
@echo linking $(@)
$(dlltool) -z $(@).def $(^)
$(dlltool) -d $(@).def -e $(@).exp
$(ld) $(@).exp $(^) $(lflags) -o $(@)
endef
endif
endif
$(executable): $(executable-objects)
$(link-executable)
$(unittest-executable): $(unittest-executable-objects)
@echo "linking $(@)"
ifeq ($(platform),windows)
ifdef ms_cl_compiler
$(ld) $(lflags) $(unittest-executable-objects) -out:$(@) \
-debug -PDB:$(subst $(exe-suffix),.pdb,$(@)) $(manifest-flags)
ifdef mt
$(mt) -nologo -manifest $(@).manifest -outputresource:"$(@);1"
endif
else
$(dlltool) -z $(@).def $(unittest-executable-objects)
$(dlltool) -d $(@).def -e $(@).exp
$(ld) $(@).exp $(unittest-executable-objects) $(lflags) -o $(@)
endif
else
$(ld) $(unittest-executable-objects) $(rdynamic) $(lflags) $(bootimage-lflags) -o $(@)
endif
$(link-executable)
$(audit-codegen-executable): $(audit-codegen-executable-objects)
$(link-executable)
$(bootimage-generator): $(bootimage-generator-objects)
echo building $(bootimage-generator) arch=$(build-arch) platform=$(bootimage-platform)

View File

@ -8,8 +8,8 @@
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef COMMON_H
#define COMMON_H
#ifndef AVIAN_COMMON_H
#define AVIAN_COMMON_H
#ifndef __STDC_CONSTANT_MACROS
# define __STDC_CONSTANT_MACROS
@ -27,6 +27,14 @@
#include "float.h"
#ifdef powerpc
# undef powerpc
#endif
#ifdef linux
# undef linux
#endif
// don't complain about using 'this' in member initializers:
# pragma warning(disable:4355)
@ -113,7 +121,7 @@ typedef intptr_t intptr_alias_t;
#else // not _MSC_VER
# include "stdint.h"
# include <stdint.h>
# define BYTES_PER_WORD __SIZEOF_POINTER__
@ -320,6 +328,17 @@ padWord(uintptr_t n)
return padWord(n, BytesPerWord);
}
inline bool fitsInInt8(int64_t v) {
return v == static_cast<int8_t>(v);
}
inline bool fitsInInt16(int64_t v) {
return v == static_cast<int16_t>(v);
}
inline bool fitsInInt32(int64_t v) {
return v == static_cast<int32_t>(v);
}
template <class T>
inline unsigned
wordOf(unsigned i)
@ -466,6 +485,12 @@ hash(const uint16_t* s, unsigned length)
return h;
}
inline void
write4(uint8_t* dst, uint32_t v)
{
memcpy(dst, &v, 4);
}
inline uint32_t
floatToBits(float f)
{
@ -537,4 +562,4 @@ equal(const void* a, unsigned al, const void* b, unsigned bl)
} // namespace vm
#endif // COMMON_H
#endif // AVIAN_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@
#include <avian/vm/codegen/compiler.h>
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/architecture.h>
#include <avian/vm/codegen/promise.h>
#include "codegen/compiler/regalloc.h"
@ -334,16 +335,13 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
virtual unsigned cost(Context* c, SiteMask dstMask)
{
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
OperandMask src;
OperandMask tmp;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
dstMask.typeMask, dstMask.registerMask);
(size, src, tmp,
OperandMask(dstMask.typeMask, dstMask.registerMask));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
SiteMask srcMask = SiteMask::lowPart(src);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
if (s->match(c, srcMask) or s->match(c, dstMask)) {
@ -359,26 +357,23 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
bool includeNextWord;
} costCalculator(value, size, includeNextWord);
Site* dst = pickTargetSite
Site* dstSite = pickTargetSite
(c, read, intersectRead, registerReserveCount, &costCalculator);
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
OperandMask src;
OperandMask tmp;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
1 << dst->type(c), dst->registerMask(c));
(size, src, tmp,
OperandMask(1 << dstSite->type(c), dstSite->registerMask(c)));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
SiteMask srcMask = SiteMask::lowPart(src);
unsigned cost = 0xFFFFFFFF;
Site* src = 0;
Site* srcSite = 0;
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
unsigned v = s->copyCost(c, dst);
unsigned v = s->copyCost(c, dstSite);
if (v == 0) {
src = s;
srcSite = s;
cost = 0;
break;
}
@ -386,50 +381,50 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
v += CopyPenalty;
}
if (v < cost) {
src = s;
srcSite = s;
cost = v;
}
}
if (cost) {
if (DebugMoves) {
char srcb[256]; src->toString(c, srcb, 256);
char dstb[256]; dst->toString(c, dstb, 256);
char srcb[256]; srcSite->toString(c, srcb, 256);
char dstb[256]; dstSite->toString(c, dstb, 256);
fprintf(stderr, "maybe move %s to %s for %p to %p\n",
srcb, dstb, value, value);
}
src->freeze(c, value);
srcSite->freeze(c, value);
value->addSite(c, dst);
value->addSite(c, dstSite);
src->thaw(c, value);
srcSite->thaw(c, value);
if (not src->match(c, srcMask)) {
src->freeze(c, value);
dst->freeze(c, value);
if (not srcSite->match(c, srcMask)) {
srcSite->freeze(c, value);
dstSite->freeze(c, value);
SiteMask tmpMask(tmpTypeMask, tmpRegisterMask, AnyFrameIndex);
SiteMask tmpMask = SiteMask::lowPart(tmp);
SingleRead tmpRead(tmpMask, 0);
tmpRead.value = value;
tmpRead.successor_ = value;
Site* tmp = pickTargetSite(c, &tmpRead, true);
Site* tmpSite = pickTargetSite(c, &tmpRead, true);
value->addSite(c, tmp);
value->addSite(c, tmpSite);
move(c, value, src, tmp);
move(c, value, srcSite, tmpSite);
dst->thaw(c, value);
src->thaw(c, value);
dstSite->thaw(c, value);
srcSite->thaw(c, value);
src = tmp;
srcSite = tmpSite;
}
move(c, value, src, dst);
move(c, value, srcSite, dstSite);
}
return dst;
return dstSite;
}
Site*
@ -757,145 +752,143 @@ saveLocals(Context* c, Event* e)
void
maybeMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& dstMask)
{
Read* read = live(c, dst);
Read* read = live(c, dstValue);
bool isStore = read == 0;
Site* target;
if (dst->target) {
target = dst->target;
if (dstValue->target) {
target = dstValue->target;
} else if (isStore) {
return;
} else {
target = pickTargetSite(c, read);
}
unsigned cost = src->source->copyCost(c, target);
unsigned cost = srcValue->source->copyCost(c, target);
if (srcSelectSize < dstSize) cost = 1;
if (cost) {
// todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand
and src->source->type(c) == lir::MemoryOperand)
and srcValue->source->type(c) == lir::MemoryOperand)
or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand));
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, target);
dstValue->addSite(c, target);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
bool addOffset = srcSize != srcSelectSize
and c->arch->bigEndian()
and src->source->type(c) == lir::MemoryOperand;
and srcValue->source->type(c) == lir::MemoryOperand;
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
static_cast<MemorySite*>(srcValue->source)->offset
+= (srcSize - srcSelectSize);
}
target->freeze(c, dst);
target->freeze(c, dstValue);
if (target->match(c, dstMask) and not useTemporary) {
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
apply(c, type, min(srcSelectSize, dstSize), src->source, src->source,
apply(c, type, min(srcSelectSize, dstSize), srcValue->source, srcValue->source,
dstSize, target, target);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
} else {
// pick a temporary register which is valid as both a
// destination and a source for the moves we need to perform:
dst->removeSite(c, target);
dstValue->removeSite(c, target);
bool thunk;
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
OperandMask src;
c->arch->planSource(type, dstSize, &srcTypeMask, &srcRegisterMask,
dstSize, &thunk);
c->arch->planSource(type, dstSize, src, dstSize, &thunk);
if (src->type == lir::ValueGeneral) {
srcRegisterMask &= c->regFile->generalRegisters.mask;
if (srcValue->type == lir::ValueGeneral) {
src.registerMask &= c->regFile->generalRegisters.mask;
}
assert(c, thunk == 0);
assert(c, dstMask.typeMask & srcTypeMask & (1 << lir::RegisterOperand));
assert(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
Site* tmpTarget = freeRegisterSite
(c, dstMask.registerMask & srcRegisterMask);
(c, dstMask.registerMask & src.registerMask);
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, tmpTarget);
dstValue->addSite(c, tmpTarget);
tmpTarget->freeze(c, dst);
tmpTarget->freeze(c, dstValue);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; tmpTarget->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
apply(c, type, srcSelectSize, src->source, src->source,
apply(c, type, srcSelectSize, srcValue->source, srcValue->source,
dstSize, tmpTarget, tmpTarget);
tmpTarget->thaw(c, dst);
tmpTarget->thaw(c, dstValue);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
if (useTemporary or isStore) {
if (DebugMoves) {
char srcb[256]; tmpTarget->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
dst->addSite(c, target);
dstValue->addSite(c, target);
tmpTarget->freeze(c, dst);
tmpTarget->freeze(c, dstValue);
apply(c, lir::Move, dstSize, tmpTarget, tmpTarget, dstSize, target, target);
tmpTarget->thaw(c, dst);
tmpTarget->thaw(c, dstValue);
if (isStore) {
dst->removeSite(c, tmpTarget);
dstValue->removeSite(c, tmpTarget);
}
}
}
target->thaw(c, dst);
target->thaw(c, dstValue);
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
static_cast<MemorySite*>(srcValue->source)->offset
-= (srcSize - srcSelectSize);
}
} else {
target = src->source;
target = srcValue->source;
if (DebugMoves) {
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, src, dst);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, srcValue, dstValue);
}
}
if (isStore) {
dst->removeSite(c, target);
dstValue->removeSite(c, target);
}
}

View File

@ -11,6 +11,8 @@
#include "codegen/compiler/context.h"
#include "codegen/compiler/resource.h"
#include <avian/vm/codegen/architecture.h>
namespace avian {
namespace codegen {
namespace compiler {

View File

@ -75,7 +75,7 @@ class Context {
vm::System* system;
Assembler* assembler;
Assembler::Architecture* arch;
Architecture* arch;
vm::Zone* zone;
Compiler::Client* client;
Stack* stack;

View File

@ -267,16 +267,15 @@ class CallEvent: public Event {
}
{ bool thunk;
uint8_t typeMask;
uint64_t planRegisterMask;
OperandMask op;
c->arch->plan
((flags & Compiler::Aligned) ? lir::AlignedCall : lir::Call, vm::TargetBytesPerWord,
&typeMask, &planRegisterMask, &thunk);
op, &thunk);
assert(c, not thunk);
this->addRead(c, address, SiteMask
(typeMask, registerMask & planRegisterMask, AnyFrameIndex));
(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
}
Stack* stack = stackBefore;
@ -546,26 +545,26 @@ void appendReturn(Context* c, unsigned size, Value* value) {
class MoveEvent: public Event {
public:
MoveEvent(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& srcLowMask, const SiteMask& srcHighMask):
Event(c), type(type), srcSize(srcSize), srcSelectSize(srcSelectSize),
src(src), dstSize(dstSize), dst(dst)
srcValue(srcValue), dstSize(dstSize), dstValue(dstValue)
{
assert(c, srcSelectSize <= srcSize);
bool noop = srcSelectSize >= dstSize;
if (dstSize > vm::TargetBytesPerWord) {
dst->grow(c);
dstValue->grow(c);
}
if (srcSelectSize > vm::TargetBytesPerWord) {
src->maybeSplit(c);
srcValue->maybeSplit(c);
}
this->addReads(c, src, srcSelectSize, srcLowMask, noop ? dst : 0,
this->addReads(c, srcValue, srcSelectSize, srcLowMask, noop ? dstValue : 0,
srcHighMask,
noop and dstSize > vm::TargetBytesPerWord ? dst->nextWord : 0);
noop and dstSize > vm::TargetBytesPerWord ? dstValue->nextWord : 0);
}
virtual const char* name() {
@ -573,118 +572,116 @@ class MoveEvent: public Event {
}
virtual void compile(Context* c) {
uint8_t dstTypeMask;
uint64_t dstRegisterMask;
OperandMask dst;
c->arch->planDestination
(type,
srcSelectSize,
1 << src->source->type(c),
(static_cast<uint64_t>(src->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(src->source->registerMask(c)),
dstSize,
&dstTypeMask,
&dstRegisterMask);
OperandMask(
1 << srcValue->source->type(c),
(static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(srcValue->source->registerMask(c))),
dstSize, dst);
SiteMask dstLowMask(dstTypeMask, dstRegisterMask, AnyFrameIndex);
SiteMask dstHighMask(dstTypeMask, dstRegisterMask >> 32, AnyFrameIndex);
SiteMask dstLowMask = SiteMask::lowPart(dst);
SiteMask dstHighMask = SiteMask::highPart(dst);
if (srcSelectSize >= vm::TargetBytesPerWord
and dstSize >= vm::TargetBytesPerWord
and srcSelectSize >= dstSize)
{
if (dst->target) {
if (dstValue->target) {
if (dstSize > vm::TargetBytesPerWord) {
if (src->source->registerSize(c) > vm::TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, src->source, src->source,
dstSize, dst->target, dst->target);
if (srcValue->source->registerSize(c) > vm::TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, srcValue->source, srcValue->source,
dstSize, dstValue->target, dstValue->target);
if (live(c, dst) == 0) {
dst->removeSite(c, dst->target);
if (live(c, dstValue) == 0) {
dstValue->removeSite(c, dstValue->target);
if (dstSize > vm::TargetBytesPerWord) {
dst->nextWord->removeSite(c, dst->nextWord->target);
dstValue->nextWord->removeSite(c, dstValue->nextWord->target);
}
}
} else {
src->nextWord->source->freeze(c, src->nextWord);
srcValue->nextWord->source->freeze(c, srcValue->nextWord);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src,
vm::TargetBytesPerWord, dst, dstLowMask);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dstValue, dstLowMask);
src->nextWord->source->thaw(c, src->nextWord);
srcValue->nextWord->source->thaw(c, srcValue->nextWord);
maybeMove
(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src->nextWord,
vm::TargetBytesPerWord, dst->nextWord, dstHighMask);
(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue->nextWord,
vm::TargetBytesPerWord, dstValue->nextWord, dstHighMask);
}
} else {
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src,
vm::TargetBytesPerWord, dst, dstLowMask);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dstValue, dstLowMask);
}
} else {
Site* low = pickSiteOrMove(c, src, dst, 0, 0);
Site* low = pickSiteOrMove(c, srcValue, dstValue, 0, 0);
if (dstSize > vm::TargetBytesPerWord) {
pickSiteOrMove(c, src->nextWord, dst->nextWord, low, 1);
pickSiteOrMove(c, srcValue->nextWord, dstValue->nextWord, low, 1);
}
}
} else if (srcSelectSize <= vm::TargetBytesPerWord
and dstSize <= vm::TargetBytesPerWord)
{
maybeMove(c, type, srcSize, srcSelectSize, src, dstSize, dst,
maybeMove(c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
dstLowMask);
} else {
assert(c, srcSize == vm::TargetBytesPerWord);
assert(c, srcSelectSize == vm::TargetBytesPerWord);
if (dst->nextWord->target or live(c, dst->nextWord)) {
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, low);
dstValue->addSite(c, low);
low->freeze(c, dst);
low->freeze(c, dstValue);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; low->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p\n",
srcb, dstb, src);
srcb, dstb, srcValue);
}
apply(c, lir::Move, vm::TargetBytesPerWord, src->source, src->source,
apply(c, lir::Move, vm::TargetBytesPerWord, srcValue->source, srcValue->source,
vm::TargetBytesPerWord, low, low);
low->thaw(c, dst);
low->thaw(c, dstValue);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
low->freeze(c, dst);
low->freeze(c, dstValue);
dst->nextWord->addSite(c, high);
dstValue->nextWord->addSite(c, high);
high->freeze(c, dst->nextWord);
high->freeze(c, dstValue->nextWord);
if (DebugMoves) {
char srcb[256]; low->toString(c, srcb, 256);
char dstb[256]; high->toString(c, dstb, 256);
fprintf(stderr, "extend %s to %s for %p %p\n",
srcb, dstb, dst, dst->nextWord);
srcb, dstb, dstValue, dstValue->nextWord);
}
apply(c, lir::Move, vm::TargetBytesPerWord, low, low, dstSize, low, high);
high->thaw(c, dst->nextWord);
high->thaw(c, dstValue->nextWord);
low->thaw(c, dst);
low->thaw(c, dstValue);
} else {
pickSiteOrMove(c, src, dst, 0, 0);
pickSiteOrMove(c, srcValue, dstValue, 0, 0);
}
}
@ -696,29 +693,28 @@ class MoveEvent: public Event {
lir::BinaryOperation type;
unsigned srcSize;
unsigned srcSelectSize;
Value* src;
Value* srcValue;
unsigned dstSize;
Value* dst;
Value* dstValue;
};
void
appendMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst)
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue)
{
bool thunk;
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
OperandMask src;
c->arch->planSource
(type, srcSelectSize, &srcTypeMask, &srcRegisterMask, dstSize, &thunk);
(type, srcSelectSize, src, dstSize, &thunk);
assert(c, not thunk);
append(c, new(c->zone)
MoveEvent
(c, type, srcSize, srcSelectSize, src, dstSize, dst,
SiteMask(srcTypeMask, srcRegisterMask, AnyFrameIndex),
SiteMask(srcTypeMask, srcRegisterMask >> 32, AnyFrameIndex)));
(c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
SiteMask::lowPart(src),
SiteMask::highPart(src)));
}
@ -794,28 +790,28 @@ Site* getTarget(Context* c, Value* value, Value* result, const SiteMask& resultM
class CombineEvent: public Event {
public:
CombineEvent(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result,
unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* resultValue,
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
Event(c), type(type), firstSize(firstSize), first(first),
secondSize(secondSize), second(second), resultSize(resultSize),
result(result)
Event(c), type(type), firstSize(firstSize), firstValue(firstValue),
secondSize(secondSize), secondValue(secondValue), resultSize(resultSize),
resultValue(resultValue)
{
this->addReads(c, first, firstSize, firstLowMask, firstHighMask);
this->addReads(c, firstValue, firstSize, firstLowMask, firstHighMask);
if (resultSize > vm::TargetBytesPerWord) {
result->grow(c);
resultValue->grow(c);
}
bool condensed = c->arch->alwaysCondensed(type);
this->addReads(c, second, secondSize,
secondLowMask, condensed ? result : 0,
secondHighMask, condensed ? result->nextWord : 0);
this->addReads(c, secondValue, secondSize,
secondLowMask, condensed ? resultValue : 0,
secondHighMask, condensed ? resultValue->nextWord : 0);
}
virtual const char* name() {
@ -823,99 +819,99 @@ class CombineEvent: public Event {
}
virtual void compile(Context* c) {
assert(c, first->source->type(c) == first->nextWord->source->type(c));
assert(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
// if (second->source->type(c) != second->nextWord->source->type(c)) {
// if (secondValue->source->type(c) != secondValue->nextWord->source->type(c)) {
// fprintf(stderr, "%p %p %d : %p %p %d\n",
// second, second->source, second->source->type(c),
// second->nextWord, second->nextWord->source,
// second->nextWord->source->type(c));
// secondValue, secondValue->source, secondValue->source->type(c),
// secondValue->nextWord, secondValue->nextWord->source,
// secondValue->nextWord->source->type(c));
// }
assert(c, second->source->type(c) == second->nextWord->source->type(c));
assert(c, secondValue->source->type(c) == secondValue->nextWord->source->type(c));
freezeSource(c, firstSize, first);
freezeSource(c, firstSize, firstValue);
uint8_t cTypeMask;
uint64_t cRegisterMask;
OperandMask cMask;
c->arch->planDestination
(type,
firstSize,
1 << first->source->type(c),
(static_cast<uint64_t>(first->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(first->source->registerMask(c)),
OperandMask(
1 << firstValue->source->type(c),
(static_cast<uint64_t>(firstValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
secondSize,
1 << second->source->type(c),
(static_cast<uint64_t>(second->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(second->source->registerMask(c)),
OperandMask(
1 << secondValue->source->type(c),
(static_cast<uint64_t>(secondValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
resultSize,
&cTypeMask,
&cRegisterMask);
cMask);
SiteMask resultLowMask(cTypeMask, cRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(cTypeMask, cRegisterMask >> 32, AnyFrameIndex);
SiteMask resultLowMask = SiteMask::lowPart(cMask);
SiteMask resultHighMask = SiteMask::highPart(cMask);
Site* low = getTarget(c, second, result, resultLowMask);
Site* low = getTarget(c, secondValue, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c);
Site* high
= (resultSize > lowSize
? getTarget(c, second->nextWord, result->nextWord, resultHighMask)
? getTarget(c, secondValue->nextWord, resultValue->nextWord, resultHighMask)
: low);
// fprintf(stderr, "combine %p:%p and %p:%p into %p:%p\n",
// first, first->nextWord,
// second, second->nextWord,
// result, result->nextWord);
// firstValue, firstValue->nextWord,
// secondValue, secondValue->nextWord,
// resultValue, resultValue->nextWord);
apply(c, type,
firstSize, first->source, first->nextWord->source,
secondSize, second->source, second->nextWord->source,
firstSize, firstValue->source, firstValue->nextWord->source,
secondSize, secondValue->source, secondValue->nextWord->source,
resultSize, low, high);
thawSource(c, firstSize, first);
thawSource(c, firstSize, firstValue);
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
low->thaw(c, second);
low->thaw(c, secondValue);
if (resultSize > lowSize) {
high->thaw(c, second->nextWord);
high->thaw(c, secondValue->nextWord);
}
if (live(c, result)) {
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
result->nextWord->addSite(c, high);
if (live(c, resultValue)) {
resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, resultValue->nextWord)) {
resultValue->nextWord->addSite(c, high);
}
}
}
lir::TernaryOperation type;
unsigned firstSize;
Value* first;
Value* firstValue;
unsigned secondSize;
Value* second;
Value* secondValue;
unsigned resultSize;
Value* result;
Value* resultValue;
};
void
appendCombine(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result)
unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* resultValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
OperandMask firstMask;
OperandMask secondMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
secondSize, &secondTypeMask, &secondRegisterMask,
resultSize, &thunk);
c->arch->planSource(type,
firstSize, firstMask,
secondSize, secondMask,
resultSize,
&thunk);
if (thunk) {
Stack* oldStack = c->stack;
@ -927,8 +923,8 @@ appendCombine(Context* c, lir::TernaryOperation type,
unsigned stackSize = ceilingDivide(secondSize, vm::TargetBytesPerWord)
+ ceilingDivide(firstSize, vm::TargetBytesPerWord);
compiler::push(c, ceilingDivide(secondSize, vm::TargetBytesPerWord), second);
compiler::push(c, ceilingDivide(firstSize, vm::TargetBytesPerWord), first);
compiler::push(c, ceilingDivide(secondSize, vm::TargetBytesPerWord), secondValue);
compiler::push(c, ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
if (threadParameter) {
++ stackSize;
@ -940,40 +936,40 @@ appendCombine(Context* c, lir::TernaryOperation type,
c->stack = oldStack;
appendCall
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, result,
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, resultValue,
resultSize, argumentStack, stackSize, 0);
} else {
append
(c, new(c->zone)
CombineEvent
(c, type,
firstSize, first,
secondSize, second,
resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
firstSize, firstValue,
secondSize, secondValue,
resultSize, resultValue,
SiteMask::lowPart(firstMask),
SiteMask::highPart(firstMask),
SiteMask::lowPart(secondMask),
SiteMask::highPart(secondMask)));
}
}
class TranslateEvent: public Event {
public:
TranslateEvent(Context* c, lir::BinaryOperation type, unsigned valueSize,
Value* value, unsigned resultSize, Value* result,
Value* value, unsigned resultSize, Value* resultValue,
const SiteMask& valueLowMask,
const SiteMask& valueHighMask):
Event(c), type(type), valueSize(valueSize), resultSize(resultSize),
value(value), result(result)
value(value), resultValue(resultValue)
{
bool condensed = c->arch->alwaysCondensed(type);
if (resultSize > vm::TargetBytesPerWord) {
result->grow(c);
resultValue->grow(c);
}
this->addReads(c, value, valueSize, valueLowMask, condensed ? result : 0,
valueHighMask, condensed ? result->nextWord : 0);
this->addReads(c, value, valueSize, valueLowMask, condensed ? resultValue : 0,
valueHighMask, condensed ? resultValue->nextWord : 0);
}
virtual const char* name() {
@ -983,27 +979,26 @@ class TranslateEvent: public Event {
virtual void compile(Context* c) {
assert(c, value->source->type(c) == value->nextWord->source->type(c));
uint8_t bTypeMask;
uint64_t bRegisterMask;
OperandMask bMask;
c->arch->planDestination
(type,
valueSize,
1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(value->source->registerMask(c)),
OperandMask(
1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(value->source->registerMask(c))),
resultSize,
&bTypeMask,
&bRegisterMask);
bMask);
SiteMask resultLowMask(bTypeMask, bRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(bTypeMask, bRegisterMask >> 32, AnyFrameIndex);
SiteMask resultLowMask = SiteMask::lowPart(bMask);
SiteMask resultHighMask = SiteMask::highPart(bMask);
Site* low = getTarget(c, value, result, resultLowMask);
Site* low = getTarget(c, value, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c);
Site* high
= (resultSize > lowSize
? getTarget(c, value->nextWord, result->nextWord, resultHighMask)
? getTarget(c, value->nextWord, resultValue->nextWord, resultHighMask)
: low);
apply(c, type, valueSize, value->source, value->nextWord->source,
@ -1018,10 +1013,10 @@ class TranslateEvent: public Event {
high->thaw(c, value->nextWord);
}
if (live(c, result)) {
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
result->nextWord->addSite(c, high);
if (live(c, resultValue)) {
resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, resultValue->nextWord)) {
resultValue->nextWord->addSite(c, high);
}
}
}
@ -1030,7 +1025,7 @@ class TranslateEvent: public Event {
unsigned valueSize;
unsigned resultSize;
Value* value;
Value* result;
Value* resultValue;
Read* resultRead;
SiteMask resultLowMask;
SiteMask resultHighMask;
@ -1038,19 +1033,18 @@ class TranslateEvent: public Event {
void
appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
Value* first, unsigned resultSize, Value* result)
Value* firstValue, unsigned resultSize, Value* resultValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
OperandMask first;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
c->arch->planSource(type, firstSize, first,
resultSize, &thunk);
if (thunk) {
Stack* oldStack = c->stack;
compiler::push(c, ceilingDivide(firstSize, vm::TargetBytesPerWord), first);
compiler::push(c, ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack;
c->stack = oldStack;
@ -1059,14 +1053,14 @@ appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
(c, value
(c, lir::ValueGeneral, constantSite
(c, c->client->getThunk(type, firstSize, resultSize))),
0, 0, result, resultSize, argumentStack,
0, 0, resultValue, resultSize, argumentStack,
ceilingDivide(firstSize, vm::TargetBytesPerWord), 0);
} else {
append(c, new(c->zone)
TranslateEvent
(c, type, firstSize, first, resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex)));
(c, type, firstSize, firstValue, resultSize, resultValue,
SiteMask::lowPart(first),
SiteMask::highPart(first)));
}
}
@ -1309,23 +1303,24 @@ thunkBranch(Context* c, lir::TernaryOperation type)
class BranchEvent: public Event {
public:
BranchEvent(Context* c, lir::TernaryOperation type, unsigned size,
Value* first, Value* second, Value* address,
Value* firstValue, Value* secondValue, Value* addressValue,
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
Event(c), type(type), size(size), first(first), second(second),
address(address)
Event(c), type(type), size(size), firstValue(firstValue), secondValue(secondValue),
addressValue(addressValue)
{
this->addReads(c, first, size, firstLowMask, firstHighMask);
this->addReads(c, second, size, secondLowMask, secondHighMask);
this->addReads(c, firstValue, size, firstLowMask, firstHighMask);
this->addReads(c, secondValue, size, secondLowMask, secondHighMask);
uint8_t typeMask;
uint64_t registerMask;
c->arch->planDestination(type, size, 0, 0, size, 0, 0, vm::TargetBytesPerWord,
&typeMask, &registerMask);
OperandMask dstMask;
c->arch->planDestination(type,
size, OperandMask(0, 0),
size, OperandMask(0, 0),
vm::TargetBytesPerWord, dstMask);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
this->addRead(c, addressValue, SiteMask::lowPart(dstMask));
}
virtual const char* name() {
@ -1333,8 +1328,8 @@ class BranchEvent: public Event {
}
virtual void compile(Context* c) {
ConstantSite* firstConstant = findConstantSite(c, first);
ConstantSite* secondConstant = findConstantSite(c, second);
ConstantSite* firstConstant = findConstantSite(c, firstValue);
ConstantSite* secondConstant = findConstantSite(c, secondValue);
if (not this->isUnreachable()) {
if (firstConstant
@ -1342,31 +1337,31 @@ class BranchEvent: public Event {
and firstConstant->value->resolved()
and secondConstant->value->resolved())
{
int64_t firstValue = firstConstant->value->value();
int64_t secondValue = secondConstant->value->value();
int64_t firstConstVal = firstConstant->value->value();
int64_t secondConstVal = secondConstant->value->value();
if (size > vm::TargetBytesPerWord) {
firstValue |= findConstantSite
(c, first->nextWord)->value->value() << 32;
secondValue |= findConstantSite
(c, second->nextWord)->value->value() << 32;
firstConstVal |= findConstantSite
(c, firstValue->nextWord)->value->value() << 32;
secondConstVal |= findConstantSite
(c, secondValue->nextWord)->value->value() << 32;
}
if (shouldJump(c, type, size, firstValue, secondValue)) {
apply(c, lir::Jump, vm::TargetBytesPerWord, address->source, address->source);
if (shouldJump(c, type, size, firstConstVal, secondConstVal)) {
apply(c, lir::Jump, vm::TargetBytesPerWord, addressValue->source, addressValue->source);
}
} else {
freezeSource(c, size, first);
freezeSource(c, size, second);
freezeSource(c, vm::TargetBytesPerWord, address);
freezeSource(c, size, firstValue);
freezeSource(c, size, secondValue);
freezeSource(c, vm::TargetBytesPerWord, addressValue);
apply(c, type, size, first->source, first->nextWord->source,
size, second->source, second->nextWord->source,
vm::TargetBytesPerWord, address->source, address->source);
apply(c, type, size, firstValue->source, firstValue->nextWord->source,
size, secondValue->source, secondValue->nextWord->source,
vm::TargetBytesPerWord, addressValue->source, addressValue->source);
thawSource(c, vm::TargetBytesPerWord, address);
thawSource(c, size, second);
thawSource(c, size, first);
thawSource(c, vm::TargetBytesPerWord, addressValue);
thawSource(c, size, secondValue);
thawSource(c, size, firstValue);
}
}
@ -1379,24 +1374,23 @@ class BranchEvent: public Event {
lir::TernaryOperation type;
unsigned size;
Value* first;
Value* second;
Value* address;
Value* firstValue;
Value* secondValue;
Value* addressValue;
};
void
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first,
Value* second, Value* address)
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* firstValue,
Value* secondValue, Value* addressValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
OperandMask firstMask;
OperandMask secondMask;
c->arch->planSource(type, size, &firstTypeMask, &firstRegisterMask,
size, &secondTypeMask, &secondRegisterMask,
vm::TargetBytesPerWord, &thunk);
c->arch->planSource(type,
size, firstMask,
size, secondMask,
vm::TargetBytesPerWord, &thunk);
if (thunk) {
Stack* oldStack = c->stack;
@ -1407,8 +1401,8 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
assert(c, not threadParameter);
compiler::push(c, ceilingDivide(size, vm::TargetBytesPerWord), second);
compiler::push(c, ceilingDivide(size, vm::TargetBytesPerWord), first);
compiler::push(c, ceilingDivide(size, vm::TargetBytesPerWord), secondValue);
compiler::push(c, ceilingDivide(size, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack;
c->stack = oldStack;
@ -1421,16 +1415,16 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
appendBranch(c, thunkBranch(c, type), 4, value
(c, lir::ValueGeneral, constantSite(c, static_cast<int64_t>(0))),
result, address);
result, addressValue);
} else {
append
(c, new(c->zone)
BranchEvent
(c, type, size, first, second, address,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
(c, type, size, firstValue, secondValue, addressValue,
SiteMask::lowPart(firstMask),
SiteMask::highPart(firstMask),
SiteMask::lowPart(secondMask),
SiteMask::highPart(secondMask)));
}
}
@ -1478,13 +1472,12 @@ class JumpEvent: public Event {
cleanLocals(cleanLocals)
{
bool thunk;
uint8_t typeMask;
uint64_t registerMask;
c->arch->plan(type, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
OperandMask mask;
c->arch->plan(type, vm::TargetBytesPerWord, mask, &thunk);
assert(c, not thunk);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
this->addRead(c, address, SiteMask::lowPart(mask));
}
virtual const char* name() {

View File

@ -13,6 +13,8 @@
#include "codegen/compiler/context.h"
#include "codegen/compiler/frame.h"
#include <avian/vm/codegen/architecture.h>
namespace avian {
namespace codegen {
namespace compiler {

View File

@ -11,6 +11,8 @@
#ifndef AVIAN_CODEGEN_COMPILER_SITE_H
#define AVIAN_CODEGEN_COMPILER_SITE_H
#include <avian/vm/codegen/architecture.h>
#include "codegen/compiler/value.h"
#include "codegen/compiler/context.h"
@ -40,6 +42,14 @@ class SiteMask {
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex);
}
static SiteMask lowPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex);
}
static SiteMask highPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex);
}
uint8_t typeMask;
uint32_t registerMask;
int frameIndex;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,962 @@
/* Copyright (c) 2010-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <avian/util/runtime-array.h>
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/architecture.h>
#include <avian/vm/codegen/registers.h>
#include "context.h"
#include "block.h"
#include "fixup.h"
#include "multimethod.h"
#include "encode.h"
#include "operations.h"
#include "registers.h"
#include "../multimethod.h"
#include "avian/alloc-vector.h"
#include <avian/util/abort.h>
using namespace vm;
using namespace avian::codegen;
using namespace avian::util;
namespace avian {
namespace codegen {
namespace arm {
namespace isa {
// HARDWARE FLAGS
bool vfpSupported() {
// TODO: Use at runtime detection
#if defined(__ARM_PCS_VFP)
// armhf
return true;
#else
// armel
// TODO: allow VFP use for -mfloat-abi=softfp armel builds.
// GCC -mfloat-abi=softfp flag allows use of VFP while remaining compatible
// with soft-float code.
return false;
#endif
}
} // namespace isa
inline unsigned lo8(int64_t i) { return (unsigned)(i&MASK_LO8); }
const RegisterFile MyRegisterFileWithoutFloats(GPR_MASK, 0);
const RegisterFile MyRegisterFileWithFloats(GPR_MASK, FPR_MASK);
const unsigned FrameHeaderSize = 1;
const unsigned StackAlignmentInBytes = 8;
const unsigned StackAlignmentInWords
= StackAlignmentInBytes / TargetBytesPerWord;
void resolve(MyBlock*);
unsigned padding(MyBlock*, unsigned);
class ConstantPoolEntry;
// BEGIN OPERATION COMPILERS
using namespace isa;
// END OPERATION COMPILERS
unsigned
argumentFootprint(unsigned footprint)
{
return max(pad(footprint, StackAlignmentInWords), StackAlignmentInWords);
}
void
nextFrame(ArchitectureContext* con, uint32_t* start, unsigned size UNUSED,
unsigned footprint, void* link, bool,
unsigned targetParameterFootprint UNUSED, void** ip, void** stack)
{
assert(con, *ip >= start);
assert(con, *ip <= start + (size / TargetBytesPerWord));
uint32_t* instruction = static_cast<uint32_t*>(*ip);
if ((*start >> 20) == 0xe59) {
// skip stack overflow check
start += 3;
}
if (instruction <= start) {
*ip = link;
return;
}
unsigned offset = footprint + FrameHeaderSize;
if (instruction <= start + 2) {
*ip = link;
*stack = static_cast<void**>(*stack) + offset;
return;
}
if (*instruction == 0xe12fff1e) { // return
*ip = link;
return;
}
if (TailCalls) {
if (argumentFootprint(targetParameterFootprint) > StackAlignmentInWords) {
offset += argumentFootprint(targetParameterFootprint)
- StackAlignmentInWords;
}
// check for post-non-tail-call stack adjustment of the form "add
// sp, sp, #offset":
if ((*instruction >> 12) == 0xe24dd) {
unsigned value = *instruction & 0xff;
unsigned rotation = (*instruction >> 8) & 0xf;
switch (rotation) {
case 0: offset -= value / TargetBytesPerWord; break;
case 15: offset -= value; break;
default: abort(con);
}
}
// todo: check for and handle tail calls
}
*ip = static_cast<void**>(*stack)[offset - 1];
*stack = static_cast<void**>(*stack) + offset;
}
class MyArchitecture: public Architecture {
public:
MyArchitecture(System* system): con(system), referenceCount(0) {
populateTables(&con);
}
virtual unsigned floatRegisterSize() {
return vfpSupported() ? 8 : 0;
}
virtual const RegisterFile* registerFile() {
return vfpSupported() ? &MyRegisterFileWithFloats : &MyRegisterFileWithoutFloats;
}
virtual int scratch() {
return 5;
}
virtual int stack() {
return StackRegister;
}
virtual int thread() {
return ThreadRegister;
}
virtual int returnLow() {
return 0;
}
virtual int returnHigh() {
return 1;
}
virtual int virtualCallTarget() {
return 4;
}
virtual int virtualCallIndex() {
return 3;
}
virtual bool bigEndian() {
return false;
}
virtual uintptr_t maximumImmediateJump() {
return 0x1FFFFFF;
}
virtual bool reserved(int register_) {
switch (register_) {
case LinkRegister:
case StackRegister:
case ThreadRegister:
case ProgramCounter:
return true;
default:
return false;
}
}
virtual unsigned frameFootprint(unsigned footprint) {
return max(footprint, StackAlignmentInWords);
}
virtual unsigned argumentFootprint(unsigned footprint) {
return arm::argumentFootprint(footprint);
}
virtual bool argumentAlignment() {
#ifdef __APPLE__
return false;
#else
return true;
#endif
}
virtual bool argumentRegisterAlignment() {
#ifdef __APPLE__
return false;
#else
return true;
#endif
}
virtual unsigned argumentRegisterCount() {
return 4;
}
virtual int argumentRegister(unsigned index) {
assert(&con, index < argumentRegisterCount());
return index;
}
virtual bool hasLinkRegister() {
return true;
}
virtual unsigned stackAlignmentInWords() {
return StackAlignmentInWords;
}
virtual bool matchCall(void* returnAddress, void* target) {
uint32_t* instruction = static_cast<uint32_t*>(returnAddress) - 1;
return *instruction == static_cast<uint32_t>
(bl(static_cast<uint8_t*>(target)
- reinterpret_cast<uint8_t*>(instruction)));
}
virtual void updateCall(lir::UnaryOperation op UNUSED,
void* returnAddress,
void* newTarget)
{
switch (op) {
case lir::Call:
case lir::Jump:
case lir::AlignedCall:
case lir::AlignedJump: {
updateOffset(con.s, static_cast<uint8_t*>(returnAddress) - 4,
reinterpret_cast<intptr_t>(newTarget));
} break;
case lir::LongCall:
case lir::LongJump:
case lir::AlignedLongCall:
case lir::AlignedLongJump: {
uint32_t* p = static_cast<uint32_t*>(returnAddress) - 2;
*reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4))
= newTarget;
} break;
default: abort(&con);
}
}
virtual unsigned constantCallSize() {
return 4;
}
virtual void setConstant(void* dst, uint64_t constant) {
*static_cast<target_uintptr_t*>(dst) = constant;
}
virtual unsigned alignFrameSize(unsigned sizeInWords) {
return pad(sizeInWords + FrameHeaderSize, StackAlignmentInWords)
- FrameHeaderSize;
}
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
void* link, bool mostRecent,
unsigned targetParameterFootprint, void** ip,
void** stack)
{
arm::nextFrame(&con, static_cast<uint32_t*>(start), size, footprint, link,
mostRecent, targetParameterFootprint, ip, stack);
}
virtual void* frameIp(void* stack) {
return stack ? static_cast<void**>(stack)[returnAddressOffset()] : 0;
}
virtual unsigned frameHeaderSize() {
return FrameHeaderSize;
}
virtual unsigned frameReturnAddressSize() {
return 0;
}
virtual unsigned frameFooterSize() {
return 0;
}
virtual int returnAddressOffset() {
return -1;
}
virtual int framePointerOffset() {
return 0;
}
virtual bool alwaysCondensed(lir::BinaryOperation) {
return false;
}
virtual bool alwaysCondensed(lir::TernaryOperation) {
return false;
}
virtual void plan
(lir::UnaryOperation,
unsigned, OperandMask& aMask,
bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false;
}
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk)
{
*thunk = false;
aMask.typeMask = ~0;
aMask.registerMask = GPR_MASK64;
switch (op) {
case lir::Negate:
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
break;
case lir::Absolute:
*thunk = true;
break;
case lir::FloatAbsolute:
case lir::FloatSquareRoot:
case lir::FloatNegate:
case lir::Float2Float:
if (vfpSupported()) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
break;
case lir::Float2Int:
// todo: Java requires different semantics than SSE for
// converting floats to integers, we we need to either use
// thunks or produce inline machine code which handles edge
// cases properly.
if (false && vfpSupported() && bSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
break;
case lir::Int2Float:
if (vfpSupported() && aSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
} else {
*thunk = true;
}
break;
default:
break;
}
}
virtual void planDestination
(lir::BinaryOperation op,
unsigned, const OperandMask& aMask,
unsigned, OperandMask& bMask)
{
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.registerMask = GPR_MASK64;
switch (op) {
case lir::Negate:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
break;
case lir::FloatAbsolute:
case lir::FloatSquareRoot:
case lir::FloatNegate:
case lir::Float2Float:
case lir::Int2Float:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = FPR_MASK64;
break;
case lir::Float2Int:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
break;
case lir::Move:
if (!(aMask.typeMask & 1 << lir::RegisterOperand)) {
bMask.typeMask = 1 << lir::RegisterOperand;
}
break;
default:
break;
}
}
virtual void planMove
(unsigned, OperandMask& srcMask,
OperandMask& tmpMask,
const OperandMask& dstMask)
{
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory or constant to memory
srcMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask = GPR_MASK64;
} else if (vfpSupported() &&
dstMask.typeMask & 1 << lir::RegisterOperand &&
dstMask.registerMask & FPR_MASK) {
srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand |
1 << lir::MemoryOperand;
tmpMask.registerMask = ~static_cast<uint64_t>(0);
}
}
virtual void planSource
(lir::TernaryOperation op,
unsigned, OperandMask& aMask,
unsigned bSize, OperandMask& bMask,
unsigned, bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = GPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
*thunk = false;
switch (op) {
case lir::ShiftLeft:
case lir::ShiftRight:
case lir::UnsignedShiftRight:
if (bSize == 8) aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Add:
case lir::Subtract:
case lir::Or:
case lir::Xor:
case lir::Multiply:
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Divide:
case lir::Remainder:
case lir::FloatRemainder:
*thunk = true;
break;
case lir::FloatAdd:
case lir::FloatSubtract:
case lir::FloatMultiply:
case lir::FloatDivide:
if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
break;
case lir::JumpIfFloatEqual:
case lir::JumpIfFloatNotEqual:
case lir::JumpIfFloatLess:
case lir::JumpIfFloatGreater:
case lir::JumpIfFloatLessOrEqual:
case lir::JumpIfFloatGreaterOrEqual:
case lir::JumpIfFloatLessOrUnordered:
case lir::JumpIfFloatGreaterOrUnordered:
case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
break;
default:
break;
}
}
virtual void planDestination
(lir::TernaryOperation op,
unsigned, const OperandMask& aMask UNUSED,
unsigned, const OperandMask& bMask,
unsigned, OperandMask& cMask)
{
if (isBranch(op)) {
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
} else {
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = bMask.registerMask;
}
}
virtual Assembler* makeAssembler(Allocator* allocator, Zone* zone);
virtual void acquire() {
++ referenceCount;
}
virtual void release() {
if (-- referenceCount == 0) {
con.s->free(this);
}
}
ArchitectureContext con;
unsigned referenceCount;
};
class MyAssembler: public Assembler {
public:
MyAssembler(System* s, Allocator* a, Zone* zone, MyArchitecture* arch):
con(s, a, zone), arch_(arch)
{ }
virtual void setClient(Client* client) {
assert(&con, con.client == 0);
con.client = client;
}
virtual Architecture* arch() {
return arch_;
}
virtual void checkStackOverflow(uintptr_t handler,
unsigned stackLimitOffsetFromThread)
{
lir::Register stack(StackRegister);
lir::Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
lir::Constant handlerConstant(new(con.zone) ResolvedPromise(handler));
branchRM(&con, lir::JumpIfGreaterOrEqual, TargetBytesPerWord, &stack, &stackLimit,
&handlerConstant);
}
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset) {
lir::Register link(LinkRegister);
lir::Memory linkDst(ThreadRegister, ipOffset);
moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst);
lir::Register stack(StackRegister);
lir::Memory stackDst(ThreadRegister, stackOffset);
moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
}
virtual void pushFrame(unsigned argumentCount, ...) {
struct Argument {
unsigned size;
lir::OperandType type;
lir::Operand* operand;
};
RUNTIME_ARRAY(Argument, arguments, argumentCount);
va_list a; va_start(a, argumentCount);
unsigned footprint = 0;
for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
RUNTIME_ARRAY_BODY(arguments)[i].type = static_cast<lir::OperandType>(va_arg(a, int));
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord);
}
va_end(a);
allocateFrame(arch_->alignFrameSize(footprint));
unsigned offset = 0;
for (unsigned i = 0; i < argumentCount; ++i) {
if (i < arch_->argumentRegisterCount()) {
lir::Register dst(arch_->argumentRegister(i));
apply(lir::Move,
OperandInfo(
RUNTIME_ARRAY_BODY(arguments)[i].size,
RUNTIME_ARRAY_BODY(arguments)[i].type,
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(
pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), lir::RegisterOperand, &dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord);
} else {
lir::Memory dst(StackRegister, offset * TargetBytesPerWord);
apply(lir::Move,
OperandInfo(
RUNTIME_ARRAY_BODY(arguments)[i].size,
RUNTIME_ARRAY_BODY(arguments)[i].type,
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(
pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), lir::MemoryOperand, &dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord);
}
}
}
virtual void allocateFrame(unsigned footprint) {
footprint += FrameHeaderSize;
// larger frames may require multiple subtract/add instructions
// to allocate/deallocate, and nextFrame will need to be taught
// how to handle them:
assert(&con, footprint < 256);
lir::Register stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
lir::Register returnAddress(LinkRegister);
lir::Memory returnAddressDst
(StackRegister, (footprint - 1) * TargetBytesPerWord);
moveRM(&con, TargetBytesPerWord, &returnAddress, TargetBytesPerWord,
&returnAddressDst);
}
virtual void adjustFrame(unsigned difference) {
lir::Register stack(StackRegister);
ResolvedPromise differencePromise(difference * TargetBytesPerWord);
lir::Constant differenceConstant(&differencePromise);
subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack);
}
virtual void popFrame(unsigned footprint) {
footprint += FrameHeaderSize;
lir::Register returnAddress(LinkRegister);
lir::Memory returnAddressSrc
(StackRegister, (footprint - 1) * TargetBytesPerWord);
moveMR(&con, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
&returnAddress);
lir::Register stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
}
virtual void popFrameForTailCall(unsigned footprint,
int offset,
int returnAddressSurrogate,
int framePointerSurrogate UNUSED)
{
assert(&con, framePointerSurrogate == lir::NoRegister);
if (TailCalls) {
if (offset) {
footprint += FrameHeaderSize;
lir::Register link(LinkRegister);
lir::Memory returnAddressSrc
(StackRegister, (footprint - 1) * TargetBytesPerWord);
moveMR(&con, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
&link);
lir::Register stack(StackRegister);
ResolvedPromise footprintPromise
((footprint - offset) * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
if (returnAddressSurrogate != lir::NoRegister) {
assert(&con, offset > 0);
lir::Register ras(returnAddressSurrogate);
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
}
} else {
popFrame(footprint);
}
} else {
abort(&con);
}
}
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
unsigned argumentFootprint)
{
popFrame(frameFootprint);
assert(&con, argumentFootprint >= StackAlignmentInWords);
assert(&con, (argumentFootprint % StackAlignmentInWords) == 0);
unsigned offset;
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
offset = argumentFootprint - StackAlignmentInWords;
lir::Register stack(StackRegister);
ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord);
lir::Constant adjustment(&adjustmentPromise);
addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack);
} else {
offset = 0;
}
return_(&con);
}
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,
unsigned stackOffsetFromThread)
{
popFrame(frameFootprint);
lir::Register stack(StackRegister);
lir::Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack);
return_(&con);
}
virtual void apply(lir::Operation op) {
arch_->con.operations[op](&con);
}
virtual void apply(lir::UnaryOperation op, OperandInfo a)
{
arch_->con.unaryOperations[Multimethod::index(op, a.type)]
(&con, a.size, a.operand);
}
virtual void apply(lir::BinaryOperation op, OperandInfo a, OperandInfo b)
{
arch_->con.binaryOperations[index(&(arch_->con), op, a.type, b.type)]
(&con, a.size, a.operand, b.size, b.operand);
}
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c)
{
if (isBranch(op)) {
assert(&con, a.size == b.size);
assert(&con, c.size == TargetBytesPerWord);
assert(&con, c.type == lir::ConstantOperand);
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)]
(&con, op, a.size, a.operand, b.operand, c.operand);
} else {
assert(&con, b.size == c.size);
assert(&con, b.type == lir::RegisterOperand);
assert(&con, c.type == lir::RegisterOperand);
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)]
(&con, b.size, a.operand, b.operand, c.operand);
}
}
virtual void setDestination(uint8_t* dst) {
con.result = dst;
}
virtual void write() {
uint8_t* dst = con.result;
unsigned dstOffset = 0;
for (MyBlock* b = con.firstBlock; b; b = b->next) {
if (DebugPool) {
fprintf(stderr, "write block %p\n", b);
}
unsigned blockOffset = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
unsigned size = e->offset - blockOffset;
memcpy(dst + dstOffset, con.code.data + b->offset + blockOffset, size);
blockOffset = e->offset;
dstOffset += size;
unsigned poolSize = 0;
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
if (DebugPool) {
fprintf(stderr, "visit pool offset %p %d in block %p\n",
o, o->offset, b);
}
unsigned entry = dstOffset + poolSize;
if (needJump(b)) {
entry += TargetBytesPerWord;
}
o->entry->address = dst + entry;
unsigned instruction = o->block->start
+ padding(o->block, o->offset) + o->offset;
int32_t v = (entry - 8) - instruction;
expect(&con, v == (v & PoolOffsetMask));
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
poolSize += TargetBytesPerWord;
}
bool jump = needJump(b);
if (jump) {
write4
(dst + dstOffset, isa::b((poolSize + TargetBytesPerWord - 8) >> 2));
}
dstOffset += poolSize + (jump ? TargetBytesPerWord : 0);
}
unsigned size = b->size - blockOffset;
memcpy(dst + dstOffset,
con.code.data + b->offset + blockOffset,
size);
dstOffset += size;
}
for (Task* t = con.tasks; t; t = t->next) {
t->run(&con);
}
for (ConstantPoolEntry* e = con.constantPool; e; e = e->next) {
if (e->constant->resolved()) {
*static_cast<target_uintptr_t*>(e->address) = e->constant->value();
} else {
new (e->constant->listen(sizeof(ConstantPoolListener)))
ConstantPoolListener(con.s, static_cast<target_uintptr_t*>(e->address),
e->callOffset
? dst + e->callOffset->value() + 8
: 0);
}
// fprintf(stderr, "constant %p at %p\n", reinterpret_cast<void*>(e->constant->value()), e->address);
}
}
virtual Promise* offset(bool forTrace) {
return arm::offsetPromise(&con, forTrace);
}
virtual Block* endBlock(bool startNew) {
MyBlock* b = con.lastBlock;
b->size = con.code.length() - b->offset;
if (startNew) {
con.lastBlock = new (con.zone) MyBlock(&con, con.code.length());
} else {
con.lastBlock = 0;
}
return b;
}
virtual void endEvent() {
MyBlock* b = con.lastBlock;
unsigned thisEventOffset = con.code.length() - b->offset;
if (b->poolOffsetHead) {
int32_t v = (thisEventOffset + TargetBytesPerWord - 8)
- b->poolOffsetHead->offset;
if (v > 0 and v != (v & PoolOffsetMask)) {
appendPoolEvent
(&con, b, b->lastEventOffset, b->poolOffsetHead,
b->lastPoolOffsetTail);
if (DebugPool) {
for (PoolOffset* o = b->poolOffsetHead;
o != b->lastPoolOffsetTail->next; o = o->next)
{
fprintf(stderr,
"in endEvent, include %p %d in pool event %p at offset %d "
"in block %p\n",
o, o->offset, b->poolEventTail, b->lastEventOffset, b);
}
}
b->poolOffsetHead = b->lastPoolOffsetTail->next;
b->lastPoolOffsetTail->next = 0;
if (b->poolOffsetHead == 0) {
b->poolOffsetTail = 0;
}
}
}
b->lastEventOffset = thisEventOffset;
b->lastPoolOffsetTail = b->poolOffsetTail;
}
virtual unsigned length() {
return con.code.length();
}
virtual unsigned footerSize() {
return 0;
}
virtual void dispose() {
con.code.dispose();
}
Context con;
MyArchitecture* arch_;
};
Assembler* MyArchitecture::makeAssembler(Allocator* allocator, Zone* zone) {
return new(zone) MyAssembler(this->con.s, allocator, zone, this);
}
} // namespace arm
Architecture*
makeArchitectureArm(System* system, bool)
{
return new (allocate(system, sizeof(arm::MyArchitecture))) arm::MyArchitecture(system);
}
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,39 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace arm {
void resolve(MyBlock*);
unsigned padding(MyBlock*, unsigned);
MyBlock::MyBlock(Context* context, unsigned offset):
context(context), next(0), poolOffsetHead(0), poolOffsetTail(0),
lastPoolOffsetTail(0), poolEventHead(0), poolEventTail(0),
lastEventOffset(0), offset(offset), start(~0), size(0)
{ }
unsigned MyBlock::resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
arm::resolve(this);
return start + size + padding(this, size);
}
} // namespace arm
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,46 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_BLOCK_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_BLOCK_H
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace arm {
class PoolEvent;
class MyBlock: public Assembler::Block {
public:
MyBlock(Context* context, unsigned offset);
virtual unsigned resolve(unsigned start, Assembler::Block* next);
Context* context;
MyBlock* next;
PoolOffset* poolOffsetHead;
PoolOffset* poolOffsetTail;
PoolOffset* lastPoolOffsetTail;
PoolEvent* poolEventHead;
PoolEvent* poolEventTail;
unsigned lastEventOffset;
unsigned offset;
unsigned start;
unsigned size;
};
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_BLOCK_H

View File

@ -0,0 +1,27 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace arm {
Context::Context(vm::System* s, vm::Allocator* a, vm::Zone* zone):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new(zone) MyBlock(this, 0)),
lastBlock(firstBlock), poolOffsetHead(0), poolOffsetTail(0),
constantPool(0), constantPoolCount(0)
{ }
} // namespace arm
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,99 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_CONTEXT_H
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
#include "avian/alloc-vector.h"
namespace vm {
class System;
class Allocator;
class Zone;
} // namespace vm
namespace avian {
namespace util {
class Aborter;
} // namespace util
namespace codegen {
namespace arm {
class Task;
class MyBlock;
class PoolOffset;
class ConstantPoolEntry;
class Context {
public:
Context(vm::System* s, vm::Allocator* a, vm::Zone* zone);
vm::System* s;
vm::Zone* zone;
Assembler::Client* client;
vm::Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
PoolOffset* poolOffsetHead;
PoolOffset* poolOffsetTail;
ConstantPoolEntry* constantPool;
unsigned constantPoolCount;
};
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*TernaryOperationType)
(Context*, unsigned, lir::Operand*, lir::Operand*,
lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(vm::System* s): s(s) { }
vm::System* s;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[lir::BinaryOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
TernaryOperationType ternaryOperations
[lir::NonBranchTernaryOperationCount * lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
};
inline avian::util::Aborter* getAborter(Context* c) {
return c->s;
}
inline avian::util::Aborter* getAborter(ArchitectureContext* c) {
return c->s;
}
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_CONTEXT_H

View File

@ -0,0 +1,184 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace arm {
namespace isa {
// SYSTEM REGISTERS
const int FPSID = 0x0;
const int FPSCR = 0x1;
const int FPEXC = 0x8;
// INSTRUCTION OPTIONS
enum CONDITION { EQ, NE, CS, CC, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV };
enum SHIFTOP { LSL, LSR, ASR, ROR };
// INSTRUCTION FORMATS
inline int DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm)
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
inline int DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm)
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | Rs<<8 | Sh<<5 | 1<<4 | Rm; }
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm)
{ return cond<<28 | 1<<25 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | rot<<8 | (imm&0xff); }
inline int BRANCH(int cond, int L, int offset)
{ return cond<<28 | 5<<25 | L<<24 | (offset&0xffffff); }
inline int BRANCHX(int cond, int L, int Rm)
{ return cond<<28 | 0x4bffc<<6 | L<<5 | 1<<4 | Rm; }
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm)
{ return cond<<28 | mul<<21 | S<<20 | Rd<<16 | Rn<<12 | Rs<<8 | 9<<4 | Rm; }
inline int XFER(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int shift, int Sh, int Rm)
{ return cond<<28 | 3<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
inline int XFERI(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int offset)
{ return cond<<28 | 2<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | (offset&0xfff); }
inline int XFER2(int cond, int P, int U, int W, int L, int Rn, int Rd, int S, int H, int Rm)
{ return cond<<28 | P<<24 | U<<23 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | 1<<7 | S<<6 | H<<5 | 1<<4 | Rm; }
inline int XFER2I(int cond, int P, int U, int W, int L, int Rn, int Rd, int offsetH, int S, int H, int offsetL)
{ return cond<<28 | P<<24 | U<<23 | 1<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | offsetH<<8 | 1<<7 | S<<6 | H<<5 | 1<<4 | (offsetL&0xf); }
inline int COOP(int cond, int opcode_1, int CRn, int CRd, int cp_num, int opcode_2, int CRm)
{ return cond<<28 | 0xe<<24 | opcode_1<<20 | CRn<<16 | CRd<<12 | cp_num<<8 | opcode_2<<5 | CRm; }
inline int COXFER(int cond, int P, int U, int N, int W, int L, int Rn, int CRd, int cp_num, int offset) // offset is in words, not bytes
{ return cond<<28 | 0x6<<25 | P<<24 | U<<23 | N<<22 | W<<21 | L<<20 | Rn<<16 | CRd<<12 | cp_num<<8 | (offset&0xff)>>2; }
inline int COREG(int cond, int opcode_1, int L, int CRn, int Rd, int cp_num, int opcode_2, int CRm)
{ return cond<<28 | 0xe<<24 | opcode_1<<21 | L<<20 | CRn<<16 | Rd<<12 | cp_num<<8 | opcode_2<<5 | 1<<4 | CRm; }
inline int COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm)
{ return cond<<28 | 0xc4<<20 | L<<20 | Rn<<16 | Rd<<12 | cp_num<<8 | opcode<<4 | CRm;}
// FIELD CALCULATORS
inline int calcU(int imm) { return imm >= 0 ? 1 : 0; }
// INSTRUCTIONS
// The "cond" and "S" fields are set using the SETCOND() and SETS() functions
inline int b(int offset) { return BRANCH(AL, 0, offset); }
inline int bl(int offset) { return BRANCH(AL, 1, offset); }
inline int bx(int Rm) { return BRANCHX(AL, 0, Rm); }
inline int blx(int Rm) { return BRANCHX(AL, 1, Rm); }
inline int and_(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm); }
inline int eor(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm); }
inline int rsb(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm); }
inline int add(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm); }
inline int adc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm); }
inline int rsc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm); }
inline int cmp(int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm); }
inline int orr(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm); }
inline int mov(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm); }
inline int mvn(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm); }
inline int andi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm); }
inline int subi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm); }
inline int rsbi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm); }
inline int addi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm); }
inline int adci(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm); }
inline int bici(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm); }
inline int cmpi(int Rn, int imm, int rot=0) { return DATAI(AL, 0xa, 1, Rn, 0, rot, imm); }
inline int movi(int Rd, int imm, int rot=0) { return DATAI(AL, 0xd, 0, 0, Rd, rot, imm); }
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh) { return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm); }
inline int movsh(int Rd, int Rm, int Rs, int Sh) { return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm); }
inline int mul(int Rd, int Rm, int Rs) { return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm); }
inline int mla(int Rd, int Rm, int Rs, int Rn) { return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm); }
inline int umull(int RdLo, int RdHi, int Rm, int Rs) { return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm); }
inline int ldr(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm); }
inline int ldri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm)); }
inline int ldrb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm); }
inline int ldrbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm)); }
inline int str(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm); }
inline int stri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm)); }
inline int strb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm); }
inline int strbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm)); }
inline int ldrh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm); }
inline int ldrhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
inline int strh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm); }
inline int strhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 0, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
inline int ldrsh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm); }
inline int ldrshi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 1, abs(imm)&0xf); }
inline int ldrsb(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm); }
inline int ldrsbi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 0, abs(imm)&0xf); }
// breakpoint instruction, this really has its own instruction format
inline int bkpt(int16_t immed) { return 0xe1200070 | (((unsigned)immed & 0xffff) >> 4 << 8) | (immed & 0xf); }
// COPROCESSOR INSTRUCTIONS
inline int mcr(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm); }
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm); }
inline int mrc(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm); }
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm); }
// VFP FLOATING-POINT INSTRUCTIONS
inline int fmuls(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|2, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
inline int fadds(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
inline int fsubs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1)|2, Sm>>1); }
inline int fdivs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|8, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
inline int fmuld(int Dd, int Dn, int Dm) { return COOP(AL, 2, Dn, Dd, 11, 0, Dm); }
inline int faddd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 0, Dm); }
inline int fsubd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 2, Dm); }
inline int fdivd(int Dd, int Dn, int Dm) { return COOP(AL, 8, Dn, Dd, 11, 0, Dm); }
inline int fcpys(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
inline int fabss(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
inline int fnegs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
inline int fsqrts(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
inline int fcmps(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 4, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
inline int fcvtds(int Dd, int Sm) { return COOP(AL, 0xb, 7, Dd, 10, 6|(Sm&1), Sm>>1); }
inline int fsitos(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 8, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
inline int ftosizs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
inline int fcpyd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 2, Dm); }
inline int fabsd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 6, Dm); }
inline int fnegd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 2, Dm); }
inline int fsqrtd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 6, Dm); }
// double-precision comparison instructions
inline int fcmpd(int Dd, int Dm) { return COOP(AL, 0xb, 4, Dd, 11, 2, Dm); }
// double-precision conversion instructions
inline int fcvtsd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 7, Sd>>1, 11, 6, Dm); }
inline int fsitod(int Dd, int Sm) { return COOP(AL, 0xb, 8, Dd, 11, 6|(Sm&1), Sm>>1); }
inline int ftosizd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 11, 6, Dm); }
// single load/store instructions for both precision types
inline int flds(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 1, Rn, Sd>>1, 10, offset); };
inline int fldd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset); };
inline int fsts(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 0, Rn, Sd>>1, 10, offset); };
inline int fstd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset); };
// move between GPRs and FPRs
inline int fmsr(int Sn, int Rd) { return mcr(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
inline int fmrs(int Rd, int Sn) { return mrc(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
// move to/from VFP system registers
inline int fmrx(int Rd, int reg) { return mrc(10, 7, Rd, reg, 0); }
// these move around pairs of single-precision registers
inline int fmdrr(int Dm, int Rd, int Rn) { return mcrr(11, 1, Rd, Rn, Dm); }
inline int fmrrd(int Rd, int Rn, int Dm) { return mrrc(11, 1, Rd, Rn, Dm); }
// FLAG SETTERS
inline int SETCOND(int ins, int cond) { return ((ins&0x0fffffff) | (cond<<28)); }
inline int SETS(int ins) { return ins | 1<<20; }
// PSEUDO-INSTRUCTIONS
inline int lsl(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSL); }
inline int lsli(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSL, imm); }
inline int lsr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSR); }
inline int lsri(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSR, imm); }
inline int asr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, ASR); }
inline int asri(int Rd, int Rm, int imm) { return mov(Rd, Rm, ASR, imm); }
inline int beq(int offset) { return SETCOND(b(offset), EQ); }
inline int bne(int offset) { return SETCOND(b(offset), NE); }
inline int bls(int offset) { return SETCOND(b(offset), LS); }
inline int bhi(int offset) { return SETCOND(b(offset), HI); }
inline int blt(int offset) { return SETCOND(b(offset), LT); }
inline int bgt(int offset) { return SETCOND(b(offset), GT); }
inline int ble(int offset) { return SETCOND(b(offset), LE); }
inline int bge(int offset) { return SETCOND(b(offset), GE); }
inline int blo(int offset) { return SETCOND(b(offset), CC); }
inline int bhs(int offset) { return SETCOND(b(offset), CS); }
inline int bpl(int offset) { return SETCOND(b(offset), PL); }
inline int fmstat() { return fmrx(15, FPSCR); }
} // namespace isa
inline void emit(Context* con, int code) { con->code.append4(code); }
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H

View File

@ -0,0 +1,177 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "fixup.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace arm {
using namespace util;
unsigned padding(MyBlock*, unsigned);
OffsetPromise::OffsetPromise(Context* con, MyBlock* block, unsigned offset, bool forTrace):
con(con), block(block), offset(offset), forTrace(forTrace)
{ }
bool OffsetPromise::resolved() {
return block->start != static_cast<unsigned>(~0);
}
int64_t OffsetPromise::value() {
assert(con, resolved());
unsigned o = offset - block->offset;
return block->start + padding
(block, forTrace ? o - vm::TargetBytesPerWord : o) + o;
}
Promise* offsetPromise(Context* con, bool forTrace) {
return new(con->zone) OffsetPromise(con, con->lastBlock, con->code.length(), forTrace);
}
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction):
s(s),
instruction(instruction)
{ }
bool OffsetListener::resolve(int64_t value, void** location) {
void* p = updateOffset(s, instruction, value);
if (location) *location = p;
return false;
}
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset):
Task(next),
promise(promise),
instructionOffset(instructionOffset)
{ }
void OffsetTask::run(Context* con) {
if (promise->resolved()) {
updateOffset
(con->s, con->result + instructionOffset->value(), promise->value());
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(con->s, con->result + instructionOffset->value());
}
}
void appendOffsetTask(Context* con, Promise* promise, Promise* instructionOffset) {
con->tasks = new(con->zone) OffsetTask(con->tasks, promise, instructionOffset);
}
bool bounded(int right, int left, int32_t v) {
return ((v << left) >> left) == v and ((v >> right) << right) == v;
}
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value) {
// ARM's PC is two words ahead, and branches drop the bottom 2 bits.
int32_t v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
int32_t mask;
expect(s, bounded(0, 8, v));
mask = 0xFFFFFF;
int32_t* p = reinterpret_cast<int32_t*>(instruction);
*p = (v & mask) | ((~mask) & *p);
return instruction + 4;
}
ConstantPoolEntry::ConstantPoolEntry(Context* con, Promise* constant, ConstantPoolEntry* next,
Promise* callOffset):
con(con), constant(constant), next(next), callOffset(callOffset),
address(0)
{ }
int64_t ConstantPoolEntry::value() {
assert(con, resolved());
return reinterpret_cast<int64_t>(address);
}
bool ConstantPoolEntry::resolved() {
return address != 0;
}
ConstantPoolListener::ConstantPoolListener(vm::System* s, vm::target_uintptr_t* address,
uint8_t* returnAddress):
s(s),
address(address),
returnAddress(returnAddress)
{ }
bool ConstantPoolListener::resolve(int64_t value, void** location) {
*address = value;
if (location) {
*location = returnAddress ? static_cast<void*>(returnAddress) : address;
}
return true;
}
PoolOffset::PoolOffset(MyBlock* block, ConstantPoolEntry* entry, unsigned offset):
block(block), entry(entry), next(0), offset(offset)
{ }
PoolEvent::PoolEvent(PoolOffset* poolOffsetHead, PoolOffset* poolOffsetTail,
unsigned offset):
poolOffsetHead(poolOffsetHead), poolOffsetTail(poolOffsetTail), next(0),
offset(offset)
{ }
void appendConstantPoolEntry(Context* con, Promise* constant, Promise* callOffset) {
if (constant->resolved()) {
// make a copy, since the original might be allocated on the
// stack, and we need our copy to live until assembly is complete
constant = new(con->zone) ResolvedPromise(constant->value());
}
con->constantPool = new(con->zone) ConstantPoolEntry(con, constant, con->constantPool, callOffset);
++ con->constantPoolCount;
PoolOffset* o = new(con->zone) PoolOffset(con->lastBlock, con->constantPool, con->code.length() - con->lastBlock->offset);
if (DebugPool) {
fprintf(stderr, "add pool offset %p %d to block %p\n",
o, o->offset, con->lastBlock);
}
if (con->lastBlock->poolOffsetTail) {
con->lastBlock->poolOffsetTail->next = o;
} else {
con->lastBlock->poolOffsetHead = o;
}
con->lastBlock->poolOffsetTail = o;
}
void appendPoolEvent(Context* con, MyBlock* b, unsigned offset, PoolOffset* head,
PoolOffset* tail)
{
PoolEvent* e = new(con->zone) PoolEvent(head, tail, offset);
if (b->poolEventTail) {
b->poolEventTail->next = e;
} else {
b->poolEventHead = e;
}
b->poolEventTail = e;
}
} // namespace arm
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,140 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H
#include "avian/target.h"
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
#include "avian/alloc-vector.h"
namespace vm {
class System;
}
namespace avian {
namespace codegen {
namespace arm {
const bool DebugPool = false;
const int32_t PoolOffsetMask = 0xFFF;
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* con) = 0;
Task* next;
};
class OffsetPromise: public Promise {
public:
OffsetPromise(Context* con, MyBlock* block, unsigned offset, bool forTrace);
virtual bool resolved();
virtual int64_t value();
Context* con;
MyBlock* block;
unsigned offset;
bool forTrace;
};
Promise* offsetPromise(Context* con, bool forTrace = false);
class OffsetListener: public Promise::Listener {
public:
OffsetListener(vm::System* s, uint8_t* instruction);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
uint8_t* instruction;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset);
virtual void run(Context* con);
Promise* promise;
Promise* instructionOffset;
};
void appendOffsetTask(Context* con, Promise* promise, Promise* instructionOffset);
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value);
class ConstantPoolEntry: public Promise {
public:
ConstantPoolEntry(Context* con, Promise* constant, ConstantPoolEntry* next,
Promise* callOffset);
virtual int64_t value();
virtual bool resolved();
Context* con;
Promise* constant;
ConstantPoolEntry* next;
Promise* callOffset;
void* address;
unsigned constantPoolCount;
};
class ConstantPoolListener: public Promise::Listener {
public:
ConstantPoolListener(vm::System* s, vm::target_uintptr_t* address,
uint8_t* returnAddress);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
vm::target_uintptr_t* address;
uint8_t* returnAddress;
};
class PoolOffset {
public:
PoolOffset(MyBlock* block, ConstantPoolEntry* entry, unsigned offset);
MyBlock* block;
ConstantPoolEntry* entry;
PoolOffset* next;
unsigned offset;
};
class PoolEvent {
public:
PoolEvent(PoolOffset* poolOffsetHead, PoolOffset* poolOffsetTail,
unsigned offset);
PoolOffset* poolOffsetHead;
PoolOffset* poolOffsetTail;
PoolEvent* next;
unsigned offset;
};
void appendConstantPoolEntry(Context* con, Promise* constant, Promise* callOffset);
void appendPoolEvent(Context* con, MyBlock* b, unsigned offset, PoolOffset* head,
PoolOffset* tail);
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H

View File

@ -0,0 +1,141 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "operations.h"
#include "multimethod.h"
#include "../multimethod.h"
namespace avian {
namespace codegen {
namespace arm {
using namespace util;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
{
return operation
+ (lir::BinaryOperationCount * operand1)
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
}
unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1)
{
assert(con, not isBranch(operation));
return operation + (lir::NonBranchTernaryOperationCount * operand1);
}
unsigned branchIndex(ArchitectureContext* con UNUSED, lir::OperandType operand1,
lir::OperandType operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
}
void populateTables(ArchitectureContext* con) {
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
OperationType* zo = con->operations;
UnaryOperationType* uo = con->unaryOperations;
BinaryOperationType* bo = con->binaryOperations;
TernaryOperationType* to = con->ternaryOperations;
BranchOperationType* bro = con->branchOperations;
zo[lir::Return] = return_;
zo[lir::LoadBarrier] = memoryBarrier;
zo[lir::StoreStoreBarrier] = memoryBarrier;
zo[lir::StoreLoadBarrier] = memoryBarrier;
zo[lir::Trap] = trap;
uo[Multimethod::index(lir::LongCall, C)] = CAST1(longCallC);
uo[Multimethod::index(lir::AlignedLongCall, C)] = CAST1(longCallC);
uo[Multimethod::index(lir::LongJump, C)] = CAST1(longJumpC);
uo[Multimethod::index(lir::AlignedLongJump, C)] = CAST1(longJumpC);
uo[Multimethod::index(lir::Jump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::Jump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::AlignedJump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::AlignedJump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::Call, C)] = CAST1(callC);
uo[Multimethod::index(lir::Call, R)] = CAST1(callR);
uo[Multimethod::index(lir::AlignedCall, C)] = CAST1(callC);
uo[Multimethod::index(lir::AlignedCall, R)] = CAST1(callR);
bo[index(con, lir::Move, R, R)] = CAST2(moveRR);
bo[index(con, lir::Move, C, R)] = CAST2(moveCR);
bo[index(con, lir::Move, C, M)] = CAST2(moveCM);
bo[index(con, lir::Move, M, R)] = CAST2(moveMR);
bo[index(con, lir::Move, R, M)] = CAST2(moveRM);
bo[index(con, lir::Move, A, R)] = CAST2(moveAR);
bo[index(con, lir::MoveZ, R, R)] = CAST2(moveZRR);
bo[index(con, lir::MoveZ, M, R)] = CAST2(moveZMR);
bo[index(con, lir::MoveZ, C, R)] = CAST2(moveCR);
bo[index(con, lir::Negate, R, R)] = CAST2(negateRR);
bo[index(con, lir::FloatAbsolute, R, R)] = CAST2(floatAbsoluteRR);
bo[index(con, lir::FloatNegate, R, R)] = CAST2(floatNegateRR);
bo[index(con, lir::Float2Float, R, R)] = CAST2(float2FloatRR);
bo[index(con, lir::Float2Int, R, R)] = CAST2(float2IntRR);
bo[index(con, lir::Int2Float, R, R)] = CAST2(int2FloatRR);
bo[index(con, lir::FloatSquareRoot, R, R)] = CAST2(floatSqrtRR);
to[index(con, lir::Add, R)] = CAST3(addR);
to[index(con, lir::Subtract, R)] = CAST3(subR);
to[index(con, lir::Multiply, R)] = CAST3(multiplyR);
to[index(con, lir::FloatAdd, R)] = CAST3(floatAddR);
to[index(con, lir::FloatSubtract, R)] = CAST3(floatSubtractR);
to[index(con, lir::FloatMultiply, R)] = CAST3(floatMultiplyR);
to[index(con, lir::FloatDivide, R)] = CAST3(floatDivideR);
to[index(con, lir::ShiftLeft, R)] = CAST3(shiftLeftR);
to[index(con, lir::ShiftLeft, C)] = CAST3(shiftLeftC);
to[index(con, lir::ShiftRight, R)] = CAST3(shiftRightR);
to[index(con, lir::ShiftRight, C)] = CAST3(shiftRightC);
to[index(con, lir::UnsignedShiftRight, R)] = CAST3(unsignedShiftRightR);
to[index(con, lir::UnsignedShiftRight, C)] = CAST3(unsignedShiftRightC);
to[index(con, lir::And, R)] = CAST3(andR);
to[index(con, lir::And, C)] = CAST3(andC);
to[index(con, lir::Or, R)] = CAST3(orR);
to[index(con, lir::Xor, R)] = CAST3(xorR);
bro[branchIndex(con, R, R)] = CAST_BRANCH(branchRR);
bro[branchIndex(con, C, R)] = CAST_BRANCH(branchCR);
bro[branchIndex(con, C, M)] = CAST_BRANCH(branchCM);
bro[branchIndex(con, R, M)] = CAST_BRANCH(branchRM);
}
} // namespace arm
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,44 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST3(x) reinterpret_cast<TernaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace arm {
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1);
unsigned branchIndex(ArchitectureContext* con UNUSED, lir::OperandType operand1,
lir::OperandType operand2);
void populateTables(ArchitectureContext* con);
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H
#include "registers.h"
namespace vm {
class System;
}
namespace avian {
namespace codegen {
namespace arm {
class Context;
// shortcut functions
inline int newTemp(Context* con) {
return con->client->acquireTemporary(GPR_MASK);
}
inline int newTemp(Context* con, unsigned mask) {
return con->client->acquireTemporary(mask);
}
inline void freeTemp(Context* con, int r) {
con->client->releaseTemporary(r);
}
inline int64_t getValue(lir::Constant* con) {
return con->value->value();
}
inline lir::Register makeTemp(Context* con) {
lir::Register tmp(newTemp(con));
return tmp;
}
inline lir::Register makeTemp64(Context* con) {
lir::Register tmp(newTemp(con), newTemp(con));
return tmp;
}
inline void freeTemp(Context* con, const lir::Register& tmp) {
if (tmp.low != lir::NoRegister) freeTemp(con, tmp.low);
if (tmp.high != lir::NoRegister) freeTemp(con, tmp.high);
}
void shiftLeftR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void moveRR(Context* con, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void shiftLeftC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
void shiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void shiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
bool needJump(MyBlock* b);
unsigned padding(MyBlock* b, unsigned offset);
void resolve(MyBlock* b);
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target);
void swapRR(Context* con, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void moveRR(Context* con, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void moveZRR(Context* con, unsigned srcSize, lir::Register* src,
unsigned, lir::Register* dst);
void moveCR(Context* con, unsigned size, lir::Constant* src,
unsigned, lir::Register* dst);
void moveCR2(Context* con, unsigned size, lir::Constant* src,
lir::Register* dst, Promise* callOffset);
void moveCR(Context* con, unsigned size, lir::Constant* src,
unsigned, lir::Register* dst);
void addR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void subR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void addC(Context* con, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void subC(Context* con, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void multiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void floatAbsoluteRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
void floatNegateRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
void float2FloatRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
void float2IntRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
void int2FloatRR(Context* con, unsigned, lir::Register* a, unsigned size, lir::Register* b);
void floatSqrtRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
void floatAddR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void floatSubtractR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void floatMultiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void floatDivideR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
int normalize(Context* con, int offset, int index, unsigned scale,
bool* preserveIndex, bool* release);
void store(Context* con, unsigned size, lir::Register* src,
int base, int offset, int index, unsigned scale, bool preserveIndex);
void moveRM(Context* con, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Memory* dst);
void load(Context* con, unsigned srcSize, int base, int offset, int index,
unsigned scale, unsigned dstSize, lir::Register* dst,
bool preserveIndex, bool signExtend);
void moveMR(Context* con, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void moveZMR(Context* con, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void andR(Context* con, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void andC(Context* con, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void orR(Context* con, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void xorR(Context* con, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void moveAR2(Context* con, unsigned srcSize, lir::Address* src,
unsigned dstSize, lir::Register* dst);
void moveAR(Context* con, unsigned srcSize, lir::Address* src,
unsigned dstSize, lir::Register* dst);
void compareRR(Context* con, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareCR(Context* con, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void compareCM(Context* con, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void compareRM(Context* con, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Memory* b);
int32_t
branch(Context* con, lir::TernaryOperation op);
void conditional(Context* con, int32_t branch, lir::Constant* target);
void branch(Context* con, lir::TernaryOperation op, lir::Constant* target);
void branchLong(Context* con, lir::TernaryOperation op, lir::Operand* al,
lir::Operand* ah, lir::Operand* bl,
lir::Operand* bh, lir::Constant* target,
BinaryOperationType compareSigned,
BinaryOperationType compareUnsigned);
void branchRR(Context* con, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Register* b,
lir::Constant* target);
void branchCR(Context* con, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Register* b,
lir::Constant* target);
void branchRM(Context* con, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Memory* b,
lir::Constant* target);
void branchCM(Context* con, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Memory* b,
lir::Constant* target);
ShiftMaskPromise*
shiftMaskPromise(Context* con, Promise* base, unsigned shift, int64_t mask);
void moveCM(Context* con, unsigned srcSize, lir::Constant* src,
unsigned dstSize, lir::Memory* dst);
void negateRR(Context* con, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Register* dst);
void callR(Context* con, unsigned size UNUSED, lir::Register* target);
void callC(Context* con, unsigned size UNUSED, lir::Constant* target);
void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target);
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target);
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target);
void return_(Context* con);
void trap(Context* con);
void memoryBarrier(Context*);
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H

View File

@ -0,0 +1,52 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H
#define AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace arm {
const uint64_t MASK_LO32 = 0xffffffff;
const unsigned MASK_LO16 = 0xffff;
const unsigned MASK_LO8 = 0xff;
const int N_GPRS = 16;
const int N_FPRS = 16;
const uint32_t GPR_MASK = 0xffff;
const uint32_t FPR_MASK = 0xffff0000;
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32;
const uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
inline bool isFpr(lir::Register* reg) {
return reg->low >= N_GPRS;
}
inline int fpr64(int reg) { return reg - N_GPRS; }
inline int fpr64(lir::Register* reg) { return fpr64(reg->low); }
inline int fpr32(int reg) { return fpr64(reg) << 1; }
inline int fpr32(lir::Register* reg) { return fpr64(reg) << 1; }
const int ThreadRegister = 8;
const int StackRegister = 13;
const int LinkRegister = 14;
const int ProgramCounter = 15;
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H

View File

@ -0,0 +1,29 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_TARGET_MULTIMETHOD_H
#define AVIAN_CODEGEN_TARGET_MULTIMETHOD_H
namespace avian {
namespace codegen {
class Multimethod {
public:
inline static unsigned index(lir::UnaryOperation operation, lir::OperandType operand) {
return operation + (lir::UnaryOperationCount * operand);
}
};
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_TARGET_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
namespace avian {
namespace codegen {
namespace powerpc {
void resolve(MyBlock*);
unsigned padding(MyBlock*, unsigned);
MyBlock::MyBlock(Context* context, unsigned offset):
context(context), next(0), jumpOffsetHead(0), jumpOffsetTail(0),
lastJumpOffsetTail(0), jumpEventHead(0), jumpEventTail(0),
lastEventOffset(0), offset(offset), start(~0), size(0), resolved(false)
{ }
unsigned MyBlock::resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
powerpc::resolve(this);
this->resolved = true;
return start + size + padding(this, size);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,44 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H
namespace avian {
namespace codegen {
namespace powerpc {
class JumpEvent;
class MyBlock: public Assembler::Block {
public:
MyBlock(Context* context, unsigned offset);
virtual unsigned resolve(unsigned start, Assembler::Block* next);
Context* context;
MyBlock* next;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpOffset* lastJumpOffsetTail;
JumpEvent* jumpEventHead;
JumpEvent* jumpEventTail;
unsigned lastEventOffset;
unsigned offset;
unsigned start;
unsigned size;
bool resolved;
};
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_BLOCK_H

View File

@ -0,0 +1,29 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
namespace avian {
namespace codegen {
namespace powerpc {
Context::Context(vm::System* s, vm::Allocator* a, vm::Zone* zone):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new(zone) MyBlock(this, 0)),
lastBlock(firstBlock), jumpOffsetHead(0), jumpOffsetTail(0),
constantPool(0), constantPoolCount(0)
{ }
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,98 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H
#include <avian/vm/codegen/assembler.h>
#include "avian/alloc-vector.h"
#ifdef powerpc
#undef powerpc
#endif
namespace vm {
class System;
class Allocator;
class Zone;
} // namespace vm
namespace avian {
namespace codegen {
namespace powerpc {
class Task;
class JumpOffset;
class ConstantPoolEntry;
class MyBlock;
class Context {
public:
Context(vm::System* s, vm::Allocator* a, vm::Zone* zone);
vm::System* s;
vm::Zone* zone;
Assembler::Client* client;
vm::Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
ConstantPoolEntry* constantPool;
unsigned constantPoolCount;
};
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*TernaryOperationType)
(Context*, unsigned, lir::Operand*, lir::Operand*,
lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(vm::System* s): s(s) { }
vm::System* s;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[lir::BinaryOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
TernaryOperationType ternaryOperations
[lir::NonBranchTernaryOperationCount * lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount * lir::OperandTypeCount * lir::OperandTypeCount];
};
inline avian::util::Aborter* getAborter(Context* con) {
return con->s;
}
inline avian::util::Aborter* getAborter(ArchitectureContext* con) {
return con->s;
}
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_CONTEXT_H

View File

@ -0,0 +1,141 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H
#ifdef powerpc
#undef powerpc
#endif
namespace avian {
namespace codegen {
namespace powerpc {
namespace isa {
// INSTRUCTION FORMATS
inline int D(int op, int rt, int ra, int d) { return op<<26|rt<<21|ra<<16|(d & 0xFFFF); }
// inline int DS(int op, int rt, int ra, int ds, int xo) { return op<<26|rt<<21|ra<<16|ds<<2|xo; }
inline int I(int op, int li, int aa, int lk) { return op<<26|(li & 0x3FFFFFC)|aa<<1|lk; }
inline int B(int op, int bo, int bi, int bd, int aa, int lk) { return op<<26|bo<<21|bi<<16|(bd & 0xFFFC)|aa<<1|lk; }
// inline int SC(int op, int lev) { return op<<26|lev<<5|2; }
inline int X(int op, int rt, int ra, int rb, int xo, int rc) { return op<<26|rt<<21|ra<<16|rb<<11|xo<<1|rc; }
inline int XL(int op, int bt, int ba, int bb, int xo, int lk) { return op<<26|bt<<21|ba<<16|bb<<11|xo<<1|lk; }
inline int XFX(int op, int rt, int spr, int xo) { return op<<26|rt<<21|((spr >> 5) | ((spr << 5) & 0x3E0))<<11|xo<<1; }
// inline int XFL(int op, int flm, int frb, int xo, int rc) { return op<<26|flm<<17|frb<<11|xo<<1|rc; }
// inline int XS(int op, int rs, int ra, int sh, int xo, int sh2, int rc) { return op<<26|rs<<21|ra<<16|sh<<11|xo<<2|sh2<<1|rc; }
inline int XO(int op, int rt, int ra, int rb, int oe, int xo, int rc) { return op<<26|rt<<21|ra<<16|rb<<11|oe<<10|xo<<1|rc; }
// inline int A(int op, int frt, int fra, int frb, int frc, int xo, int rc) { return op<<26|frt<<21|fra<<16|frb<<11|frc<<6|xo<<1|rc; }
inline int M(int op, int rs, int ra, int rb, int mb, int me, int rc) { return op<<26|rs<<21|ra<<16|rb<<11|mb<<6|me<<1|rc; }
// inline int MD(int op, int rs, int ra, int sh, int mb, int xo, int sh2, int rc) { return op<<26|rs<<21|ra<<16|sh<<11|mb<<5|xo<<2|sh2<<1|rc; }
// inline int MDS(int op, int rs, int ra, int rb, int mb, int xo, int rc) { return op<<26|rs<<21|ra<<16|rb<<11|mb<<5|xo<<1|rc; }
// INSTRUCTIONS
inline int lbz(int rt, int ra, int i) { return D(34, rt, ra, i); }
inline int lbzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 87, 0); }
inline int lha(int rt, int ra, int i) { return D(42, rt, ra, i); }
inline int lhax(int rt, int ra, int rb) { return X(31, rt, ra, rb, 343, 0); }
// inline int lhz(int rt, int ra, int i) { return D(40, rt, ra, i); }
inline int lhzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 279, 0); }
inline int lwz(int rt, int ra, int i) { return D(32, rt, ra, i); }
inline int lwzx(int rt, int ra, int rb) { return X(31, rt, ra, rb, 23, 0); }
inline int stb(int rs, int ra, int i) { return D(38, rs, ra, i); }
inline int stbx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 215, 0); }
inline int sth(int rs, int ra, int i) { return D(44, rs, ra, i); }
inline int sthx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 407, 0); }
inline int stw(int rs, int ra, int i) { return D(36, rs, ra, i); }
inline int stwu(int rs, int ra, int i) { return D(37, rs, ra, i); }
inline int stwux(int rs, int ra, int rb) { return X(31, rs, ra, rb, 183, 0); }
inline int stwx(int rs, int ra, int rb) { return X(31, rs, ra, rb, 151, 0); }
inline int add(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 266, 0); }
inline int addc(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 10, 0); }
inline int adde(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 138, 0); }
inline int addi(int rt, int ra, int i) { return D(14, rt, ra, i); }
inline int addic(int rt, int ra, int i) { return D(12, rt, ra, i); }
inline int addis(int rt, int ra, int i) { return D(15, rt, ra, i); }
inline int subf(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 40, 0); }
inline int subfc(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 8, 0); }
inline int subfe(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 136, 0); }
inline int subfic(int rt, int ra, int i) { return D(8, rt, ra, i); }
inline int subfze(int rt, int ra) { return XO(31, rt, ra, 0, 0, 200, 0); }
inline int mullw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 235, 0); }
// inline int mulhw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 75, 0); }
inline int mulhwu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 11, 0); }
// inline int mulli(int rt, int ra, int i) { return D(7, rt, ra, i); }
inline int divw(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 491, 0); }
// inline int divwu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 459, 0); }
// inline int divd(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 489, 0); }
// inline int divdu(int rt, int ra, int rb) { return XO(31, rt, ra, rb, 0, 457, 0); }
inline int neg(int rt, int ra) { return XO(31, rt, ra, 0, 0, 104, 0); }
inline int and_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 28, 0); }
inline int andi(int rt, int ra, int i) { return D(28, ra, rt, i); }
inline int andis(int rt, int ra, int i) { return D(29, ra, rt, i); }
inline int or_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 444, 0); }
inline int ori(int rt, int ra, int i) { return D(24, rt, ra, i); }
inline int xor_(int rt, int ra, int rb) { return X(31, ra, rt, rb, 316, 0); }
inline int oris(int rt, int ra, int i) { return D(25, rt, ra, i); }
inline int xori(int rt, int ra, int i) { return D(26, rt, ra, i); }
inline int xoris(int rt, int ra, int i) { return D(27, rt, ra, i); }
inline int rlwinm(int rt, int ra, int i, int mb, int me) { return M(21, ra, rt, i, mb, me, 0); }
inline int rlwimi(int rt, int ra, int i, int mb, int me) { return M(20, ra, rt, i, mb, me, 0); }
inline int slw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 24, 0); }
// inline int sld(int rt, int ra, int rb) { return X(31, ra, rt, rb, 27, 0); }
inline int srw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 536, 0); }
inline int sraw(int rt, int ra, int sh) { return X(31, ra, rt, sh, 792, 0); }
inline int srawi(int rt, int ra, int sh) { return X(31, ra, rt, sh, 824, 0); }
inline int extsb(int rt, int rs) { return X(31, rs, rt, 0, 954, 0); }
inline int extsh(int rt, int rs) { return X(31, rs, rt, 0, 922, 0); }
inline int mfspr(int rt, int spr) { return XFX(31, rt, spr, 339); }
inline int mtspr(int spr, int rs) { return XFX(31, rs, spr, 467); }
inline int b(int i) { return I(18, i, 0, 0); }
inline int bl(int i) { return I(18, i, 0, 1); }
inline int bcctr(int bo, int bi, int lk) { return XL(19, bo, bi, 0, 528, lk); }
inline int bclr(int bo, int bi, int lk) { return XL(19, bo, bi, 0, 16, lk); }
inline int bc(int bo, int bi, int bd, int lk) { return B(16, bo, bi, bd, 0, lk); }
inline int cmp(int bf, int ra, int rb) { return X(31, bf << 2, ra, rb, 0, 0); }
inline int cmpl(int bf, int ra, int rb) { return X(31, bf << 2, ra, rb, 32, 0); }
inline int cmpi(int bf, int ra, int i) { return D(11, bf << 2, ra, i); }
inline int cmpli(int bf, int ra, int i) { return D(10, bf << 2, ra, i); }
inline int sync(int L) { return X(31, L, 0, 0, 598, 0); }
// PSEUDO-INSTRUCTIONS
inline int li(int rt, int i) { return addi(rt, 0, i); }
inline int lis(int rt, int i) { return addis(rt, 0, i); }
inline int slwi(int rt, int ra, int i) { return rlwinm(rt, ra, i, 0, 31-i); }
inline int srwi(int rt, int ra, int i) { return rlwinm(rt, ra, 32-i, i, 31); }
// inline int sub(int rt, int ra, int rb) { return subf(rt, rb, ra); }
// inline int subc(int rt, int ra, int rb) { return subfc(rt, rb, ra); }
// inline int subi(int rt, int ra, int i) { return addi(rt, ra, -i); }
// inline int subis(int rt, int ra, int i) { return addis(rt, ra, -i); }
inline int mr(int rt, int ra) { return or_(rt, ra, ra); }
inline int mflr(int rx) { return mfspr(rx, 8); }
inline int mtlr(int rx) { return mtspr(8, rx); }
inline int mtctr(int rd) { return mtspr(9, rd); }
inline int bctr() { return bcctr(20, 0, 0); }
inline int bctrl() { return bcctr(20, 0, 1); }
inline int blr() { return bclr(20, 0, 0); }
inline int blt(int i) { return bc(12, 0, i, 0); }
inline int bgt(int i) { return bc(12, 1, i, 0); }
inline int bge(int i) { return bc(4, 0, i, 0); }
inline int ble(int i) { return bc(4, 1, i, 0); }
inline int beq(int i) { return bc(12, 2, i, 0); }
inline int bne(int i) { return bc(4, 2, i, 0); }
inline int cmpw(int ra, int rb) { return cmp(0, ra, rb); }
inline int cmplw(int ra, int rb) { return cmpl(0, ra, rb); }
inline int cmpwi(int ra, int i) { return cmpi(0, ra, i); }
inline int cmplwi(int ra, int i) { return cmpli(0, ra, i); }
inline int trap() { return 0x7fe00008; } // todo: macro-ify
} // namespace isa
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_ENCODE_H

View File

@ -0,0 +1,243 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "fixup.h"
#include "encode.h"
namespace avian {
namespace codegen {
namespace powerpc {
using namespace isa;
using namespace util;
unsigned padding(MyBlock*, unsigned);
int ha16(int32_t i);
bool bounded(int right, int left, int32_t v) {
return ((v << left) >> left) == v and ((v >> right) << right) == v;
}
OffsetPromise::OffsetPromise(Context* c, MyBlock* block, unsigned offset):
c(c), block(block), offset(offset)
{ }
bool OffsetPromise::resolved() {
return block->resolved;
}
int64_t OffsetPromise::value() {
assert(c, resolved());
unsigned o = offset - block->offset;
return block->start + padding(block, o) + o;
}
Promise* offsetPromise(Context* c) {
return new(c->zone) OffsetPromise(c, c->lastBlock, c->code.length());
}
void* updateOffset(vm::System* s, uint8_t* instruction, bool conditional, int64_t value,
void* jumpAddress)
{
int32_t v = reinterpret_cast<uint8_t*>(value) - instruction;
int32_t mask;
if (conditional) {
if (not bounded(2, 16, v)) {
*static_cast<uint32_t*>(jumpAddress) = isa::b(0);
updateOffset(s, static_cast<uint8_t*>(jumpAddress), false, value, 0);
v = static_cast<uint8_t*>(jumpAddress) - instruction;
expect(s, bounded(2, 16, v));
}
mask = 0xFFFC;
} else {
expect(s, bounded(2, 6, v));
mask = 0x3FFFFFC;
}
int32_t* p = reinterpret_cast<int32_t*>(instruction);
*p = vm::targetV4((v & mask) | ((~mask) & vm::targetV4(*p)));
return instruction + 4;
}
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction, bool conditional,
void* jumpAddress):
s(s),
instruction(instruction),
jumpAddress(jumpAddress),
conditional(conditional)
{ }
bool OffsetListener::resolve(int64_t value, void** location) {
void* p = updateOffset(s, instruction, conditional, value, jumpAddress);
if (location) *location = p;
return false;
}
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
bool conditional):
Task(next),
promise(promise),
instructionOffset(instructionOffset),
jumpAddress(0),
conditional(conditional)
{ }
void OffsetTask::run(Context* c) {
if (promise->resolved()) {
updateOffset
(c->s, c->result + instructionOffset->value(), conditional,
promise->value(), jumpAddress);
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
conditional, jumpAddress);
}
}
JumpOffset::JumpOffset(MyBlock* block, OffsetTask* task, unsigned offset):
block(block), task(task), next(0), offset(offset)
{ }
JumpEvent::JumpEvent(JumpOffset* jumpOffsetHead, JumpOffset* jumpOffsetTail,
unsigned offset):
jumpOffsetHead(jumpOffsetHead), jumpOffsetTail(jumpOffsetTail), next(0),
offset(offset)
{ }
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
bool conditional)
{
OffsetTask* task = new(c->zone) OffsetTask(c->tasks, promise, instructionOffset, conditional);
c->tasks = task;
if (conditional) {
JumpOffset* offset =
new(c->zone) JumpOffset(c->lastBlock, task, c->code.length() - c->lastBlock->offset);
if (c->lastBlock->jumpOffsetTail) {
c->lastBlock->jumpOffsetTail->next = offset;
} else {
c->lastBlock->jumpOffsetHead = offset;
}
c->lastBlock->jumpOffsetTail = offset;
}
}
void appendJumpEvent(Context* c, MyBlock* b, unsigned offset, JumpOffset* head,
JumpOffset* tail)
{
JumpEvent* e = new(c->zone) JumpEvent
(head, tail, offset);
if (b->jumpEventTail) {
b->jumpEventTail->next = e;
} else {
b->jumpEventHead = e;
}
b->jumpEventTail = e;
}
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask) {
return new (c->zone) ShiftMaskPromise(base, shift, mask);
}
void
updateImmediate(vm::System* s, void* dst, int32_t src, unsigned size, bool address)
{
switch (size) {
case 4: {
int32_t* p = static_cast<int32_t*>(dst);
int r = (vm::targetV4(p[1]) >> 21) & 31;
if (address) {
p[0] = vm::targetV4(lis(r, ha16(src)));
p[1] |= vm::targetV4(src & 0xFFFF);
} else {
p[0] = vm::targetV4(lis(r, src >> 16));
p[1] = vm::targetV4(ori(r, r, src));
}
} break;
default: abort(s);
}
}
ImmediateListener::ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset,
bool address):
s(s), dst(dst), size(size), offset(offset), address(address)
{ }
bool ImmediateListener::resolve(int64_t value, void** location) {
updateImmediate(s, dst, value, size, address);
if (location) *location = static_cast<uint8_t*>(dst) + offset;
return false;
}
ImmediateTask::ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset, bool address):
Task(next),
promise(promise),
offset(offset),
size(size),
promiseOffset(promiseOffset),
address(address)
{ }
void ImmediateTask::run(Context* c) {
if (promise->resolved()) {
updateImmediate
(c->s, c->result + offset->value(), promise->value(), size, address);
} else {
new (promise->listen(sizeof(ImmediateListener))) ImmediateListener
(c->s, c->result + offset->value(), size, promiseOffset, address);
}
}
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset, bool address)
{
c->tasks = new(c->zone) ImmediateTask(c->tasks, promise, offset, size, promiseOffset, address);
}
ConstantPoolEntry::ConstantPoolEntry(Context* c, Promise* constant):
c(c), constant(constant), next(c->constantPool), address(0)
{
c->constantPool = this;
++ c->constantPoolCount;
}
int64_t ConstantPoolEntry::value() {
assert(c, resolved());
return reinterpret_cast<intptr_t>(address);
}
bool ConstantPoolEntry::resolved() {
return address != 0;
}
ConstantPoolEntry* appendConstantPoolEntry(Context* c, Promise* constant) {
return new (c->zone) ConstantPoolEntry(c, constant);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,160 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H
namespace avian {
namespace codegen {
namespace powerpc {
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* c) = 0;
Task* next;
};
class OffsetPromise: public Promise {
public:
OffsetPromise(Context* c, MyBlock* block, unsigned offset);
virtual bool resolved();
virtual int64_t value();
Context* c;
MyBlock* block;
unsigned offset;
};
Promise* offsetPromise(Context* c);
void*
updateOffset(vm::System* s, uint8_t* instruction, bool conditional, int64_t value,
void* jumpAddress);
class OffsetListener: public Promise::Listener {
public:
OffsetListener(vm::System* s, uint8_t* instruction, bool conditional,
void* jumpAddress);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
uint8_t* instruction;
void* jumpAddress;
bool conditional;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
bool conditional);
virtual void run(Context* c);
Promise* promise;
Promise* instructionOffset;
void* jumpAddress;
bool conditional;
};
class JumpOffset {
public:
JumpOffset(MyBlock* block, OffsetTask* task, unsigned offset);
MyBlock* block;
OffsetTask* task;
JumpOffset* next;
unsigned offset;
};
class JumpEvent {
public:
JumpEvent(JumpOffset* jumpOffsetHead, JumpOffset* jumpOffsetTail,
unsigned offset);
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpEvent* next;
unsigned offset;
};
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
bool conditional);
void appendJumpEvent(Context* c, MyBlock* b, unsigned offset, JumpOffset* head,
JumpOffset* tail);
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask);
void updateImmediate(vm::System* s, void* dst, int32_t src, unsigned size, bool address);
class ImmediateListener: public Promise::Listener {
public:
ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset,
bool address);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
void* dst;
unsigned size;
unsigned offset;
bool address;
};
class ImmediateTask: public Task {
public:
ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset, bool address);
virtual void run(Context* c);
Promise* promise;
Promise* offset;
unsigned size;
unsigned promiseOffset;
bool address;
};
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset, bool address);
class ConstantPoolEntry: public Promise {
public:
ConstantPoolEntry(Context* c, Promise* constant);
virtual int64_t value();
virtual bool resolved();
Context* c;
Promise* constant;
ConstantPoolEntry* next;
void* address;
};
ConstantPoolEntry* appendConstantPoolEntry(Context* c, Promise* constant);
inline int ha16(int32_t i) {
return ((i >> 16) + ((i & 0x8000) ? 1 : 0)) & 0xffff;
}
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_FIXUP_H

View File

@ -0,0 +1,139 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "context.h"
#include "block.h"
#include "avian/common.h"
#include "operations.h"
#include "multimethod.h"
#include "../multimethod.h"
namespace avian {
namespace codegen {
namespace powerpc {
using namespace util;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
{
return operation
+ (lir::BinaryOperationCount * operand1)
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
}
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1)
{
assert(c, not isBranch(operation));
return operation + (lir::NonBranchTernaryOperationCount * operand1);
}
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
}
void populateTables(ArchitectureContext* c) {
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations;
BinaryOperationType* bo = c->binaryOperations;
TernaryOperationType* to = c->ternaryOperations;
BranchOperationType* bro = c->branchOperations;
zo[lir::Return] = return_;
zo[lir::LoadBarrier] = memoryBarrier;
zo[lir::StoreStoreBarrier] = memoryBarrier;
zo[lir::StoreLoadBarrier] = memoryBarrier;
zo[lir::Trap] = trap;
uo[Multimethod::index(lir::LongCall, C)] = CAST1(longCallC);
uo[Multimethod::index(lir::AlignedLongCall, C)] = CAST1(alignedLongCallC);
uo[Multimethod::index(lir::LongJump, C)] = CAST1(longJumpC);
uo[Multimethod::index(lir::AlignedLongJump, C)] = CAST1(alignedLongJumpC);
uo[Multimethod::index(lir::Jump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::Jump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::AlignedJump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::AlignedJump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::Call, C)] = CAST1(callC);
uo[Multimethod::index(lir::Call, R)] = CAST1(callR);
uo[Multimethod::index(lir::AlignedCall, C)] = CAST1(callC);
uo[Multimethod::index(lir::AlignedCall, R)] = CAST1(callR);
bo[index(c, lir::Move, R, R)] = CAST2(moveRR);
bo[index(c, lir::Move, C, R)] = CAST2(moveCR);
bo[index(c, lir::Move, C, M)] = CAST2(moveCM);
bo[index(c, lir::Move, M, R)] = CAST2(moveMR);
bo[index(c, lir::Move, R, M)] = CAST2(moveRM);
bo[index(c, lir::Move, A, R)] = CAST2(moveAR);
bo[index(c, lir::MoveZ, R, R)] = CAST2(moveZRR);
bo[index(c, lir::MoveZ, M, R)] = CAST2(moveZMR);
bo[index(c, lir::MoveZ, C, R)] = CAST2(moveCR);
bo[index(c, lir::Negate, R, R)] = CAST2(negateRR);
to[index(c, lir::Add, R)] = CAST3(addR);
to[index(c, lir::Add, C)] = CAST3(addC);
to[index(c, lir::Subtract, R)] = CAST3(subR);
to[index(c, lir::Subtract, C)] = CAST3(subC);
to[index(c, lir::Multiply, R)] = CAST3(multiplyR);
to[index(c, lir::Divide, R)] = CAST3(divideR);
to[index(c, lir::Remainder, R)] = CAST3(remainderR);
to[index(c, lir::ShiftLeft, R)] = CAST3(shiftLeftR);
to[index(c, lir::ShiftLeft, C)] = CAST3(shiftLeftC);
to[index(c, lir::ShiftRight, R)] = CAST3(shiftRightR);
to[index(c, lir::ShiftRight, C)] = CAST3(shiftRightC);
to[index(c, lir::UnsignedShiftRight, R)] = CAST3(unsignedShiftRightR);
to[index(c, lir::UnsignedShiftRight, C)] = CAST3(unsignedShiftRightC);
to[index(c, lir::And, C)] = CAST3(andC);
to[index(c, lir::And, R)] = CAST3(andR);
to[index(c, lir::Or, C)] = CAST3(orC);
to[index(c, lir::Or, R)] = CAST3(orR);
to[index(c, lir::Xor, C)] = CAST3(xorC);
to[index(c, lir::Xor, R)] = CAST3(xorR);
bro[branchIndex(c, R, R)] = CAST_BRANCH(branchRR);
bro[branchIndex(c, C, R)] = CAST_BRANCH(branchCR);
bro[branchIndex(c, C, M)] = CAST_BRANCH(branchCM);
bro[branchIndex(c, R, M)] = CAST_BRANCH(branchRM);
}
} // namespace powerpc
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,41 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST3(x) reinterpret_cast<TernaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace powerpc {
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1);
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2);
void populateTables(ArchitectureContext* c);
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,197 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H
#include "context.h"
namespace avian {
namespace codegen {
namespace powerpc {
inline void emit(Context* con, int code) { con->code.append4(vm::targetV4(code)); }
inline int newTemp(Context* con) { return con->client->acquireTemporary(); }
inline void freeTemp(Context* con, int r) { con->client->releaseTemporary(r); }
inline int64_t getValue(lir::Constant* c) { return c->value->value(); }
void andC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void shiftLeftR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void moveRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void shiftLeftC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void shiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void shiftRightC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void unsignedShiftRightC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* target);
void swapRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void moveRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize, lir::Register* dst);
void moveZRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned, lir::Register* dst);
void moveCR2(Context* c, unsigned, lir::Constant* src,
unsigned dstSize, lir::Register* dst, unsigned promiseOffset);
void moveCR(Context* c, unsigned srcSize, lir::Constant* src,
unsigned dstSize, lir::Register* dst);
void addR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void addC(Context* con, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void subR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void subC(Context* c, unsigned size, lir::Constant* a, lir::Register* b, lir::Register* t);
void multiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
void divideR(Context* con, unsigned size UNUSED, lir::Register* a, lir::Register* b, lir::Register* t);
void remainderR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
int
normalize(Context* c, int offset, int index, unsigned scale,
bool* preserveIndex, bool* release);
void store(Context* c, unsigned size, lir::Register* src,
int base, int offset, int index, unsigned scale, bool preserveIndex);
void moveRM(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Memory* dst);
void moveAndUpdateRM(Context* c, unsigned srcSize UNUSED, lir::Register* src,
unsigned dstSize UNUSED, lir::Memory* dst);
void load(Context* c, unsigned srcSize, int base, int offset, int index,
unsigned scale, unsigned dstSize, lir::Register* dst,
bool preserveIndex, bool signExtend);
void moveMR(Context* c, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void moveZMR(Context* c, unsigned srcSize, lir::Memory* src,
unsigned dstSize, lir::Register* dst);
void andR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void andC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void orR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void orC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void xorR(Context* c, unsigned size, lir::Register* a,
lir::Register* b, lir::Register* dst);
void xorC(Context* c, unsigned size, lir::Constant* a,
lir::Register* b, lir::Register* dst);
void moveAR2(Context* c, unsigned srcSize UNUSED, lir::Address* src,
unsigned dstSize, lir::Register* dst, unsigned promiseOffset);
void moveAR(Context* c, unsigned srcSize, lir::Address* src,
unsigned dstSize, lir::Register* dst);
void compareRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void compareRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Memory* b);
void compareUnsignedRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareUnsignedCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
int32_t branch(Context* c, lir::TernaryOperation op);
void conditional(Context* c, int32_t branch, lir::Constant* target);
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target);
void branchLong(Context* c, lir::TernaryOperation op, lir::Operand* al,
lir::Operand* ah, lir::Operand* bl,
lir::Operand* bh, lir::Constant* target,
BinaryOperationType compareSigned,
BinaryOperationType compareUnsigned);
void branchRR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Register* b,
lir::Constant* target);
void branchCR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Register* b,
lir::Constant* target);
void branchRM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Memory* b,
lir::Constant* target);
void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Memory* b,
lir::Constant* target);
void moveCM(Context* c, unsigned srcSize, lir::Constant* src,
unsigned dstSize, lir::Memory* dst);
void negateRR(Context* c, unsigned srcSize, lir::Register* src,
unsigned dstSize UNUSED, lir::Register* dst);
void callR(Context* c, unsigned size UNUSED, lir::Register* target);
void callC(Context* c, unsigned size UNUSED, lir::Constant* target);
void longCallC(Context* c, unsigned size UNUSED, lir::Constant* target);
void alignedLongCallC(Context* c, unsigned size UNUSED, lir::Constant* target);
void longJumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void alignedLongJumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* target);
void return_(Context* c);
void trap(Context* c);
void memoryBarrier(Context* c);
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_OPERATIONS_H

View File

@ -0,0 +1,23 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H
#define AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H
namespace avian {
namespace codegen {
namespace powerpc {
} // namespace powerpc
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_POWERPC_REGISTERS_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "block.h"
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace x86 {
unsigned
padding(AlignmentPadding* p, unsigned index, unsigned offset, AlignmentPadding* limit);
MyBlock::MyBlock(unsigned offset):
next(0), firstPadding(0), lastPadding(0), offset(offset), start(~0),
size(0)
{ }
unsigned MyBlock::resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
return start + size + padding(firstPadding, start, offset, lastPadding);
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,40 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace x86 {
class AlignmentPadding;
class MyBlock: public Assembler::Block {
public:
MyBlock(unsigned offset);
virtual unsigned resolve(unsigned start, Assembler::Block* next);
MyBlock* next;
AlignmentPadding* firstPadding;
AlignmentPadding* lastPadding;
unsigned offset;
unsigned start;
unsigned size;
};
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H

View File

@ -0,0 +1,33 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/allocator.h"
#include "avian/zone.h"
#include "context.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace x86 {
ArchitectureContext::ArchitectureContext(vm::System* s, bool useNativeFeatures):
s(s), useNativeFeatures(useNativeFeatures)
{ }
Context::Context(vm::System* s, vm::Allocator* a, vm::Zone* zone, ArchitectureContext* ac):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new(zone) MyBlock(0)),
lastBlock(firstBlock), ac(ac)
{ }
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,103 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
#include <stdint.h>
#include "avian/alloc-vector.h"
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/system/system.h>
namespace vm {
class System;
class Allocator;
class Zone;
} // namespace vm
namespace avian {
namespace util {
class Aborter;
} // namespace util
namespace codegen {
namespace x86 {
class Context;
class MyBlock;
class Task;
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(vm::System* s, bool useNativeFeatures);
vm::System* s;
bool useNativeFeatures;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount
* lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
};
class Context {
public:
Context(vm::System* s, vm::Allocator* a, vm::Zone* zone, ArchitectureContext* ac);
vm::System* s;
vm::Zone* zone;
Assembler::Client* client;
vm::Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
ArchitectureContext* ac;
};
inline avian::util::Aborter* getAborter(Context* c) {
return c->s;
}
inline avian::util::Aborter* getAborter(ArchitectureContext* c) {
return c->s;
}
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H

View File

@ -0,0 +1,41 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/target.h"
#include "context.h"
namespace avian {
namespace codegen {
namespace x86 {
extern "C" bool
detectFeature(unsigned ecx, unsigned edx);
bool useSSE(ArchitectureContext* c) {
if (vm::TargetBytesPerWord == 8) {
// amd64 implies SSE2 support
return true;
} else if (c->useNativeFeatures) {
static int supported = -1;
if (supported == -1) {
supported = detectFeature(0, 0x2000000) // SSE 1
and detectFeature(0, 0x4000000); // SSE 2
}
return supported;
} else {
return false;
}
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,28 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H
#include <avian/vm/codegen/assembler.h>
namespace avian {
namespace codegen {
namespace x86 {
class ArchitectureContext;
bool useSSE(ArchitectureContext* c);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H

View File

@ -0,0 +1,354 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/target.h"
#include "avian/alloc-vector.h"
#include <avian/util/abort.h>
#include <avian/util/math.h>
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/promise.h>
#include "context.h"
#include "encode.h"
#include "registers.h"
#include "fixup.h"
using namespace avian::util;
namespace avian {
namespace codegen {
namespace x86 {
#define REX_W 0x48
#define REX_R 0x44
#define REX_X 0x42
#define REX_B 0x41
#define REX_NONE 0x40
void maybeRex(Context* c, unsigned size, int a, int index, int base, bool always) {
if (vm::TargetBytesPerWord == 8) {
uint8_t byte;
if (size == 8) {
byte = REX_W;
} else {
byte = REX_NONE;
}
if (a != lir::NoRegister and (a & 8)) byte |= REX_R;
if (index != lir::NoRegister and (index & 8)) byte |= REX_X;
if (base != lir::NoRegister and (base & 8)) byte |= REX_B;
if (always or byte != REX_NONE) c->code.append(byte);
}
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) {
maybeRex(c, size, a->low, lir::NoRegister, b->low, false);
}
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) {
maybeRex(c, size, a->low, lir::NoRegister, b->low, true);
}
void maybeRex(Context* c, unsigned size, lir::Register* a) {
maybeRex(c, size, lir::NoRegister, lir::NoRegister, a->low, false);
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b) {
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low & 4));
}
void maybeRex(Context* c, unsigned size, lir::Memory* a) {
maybeRex(c, size, lir::NoRegister, a->index, a->base, false);
}
void modrm(Context* c, uint8_t mod, int a, int b) {
c->code.append(mod | (regCode(b) << 3) | regCode(a));
}
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b) {
modrm(c, mod, a->low, b->low);
}
void sib(Context* c, unsigned scale, int index, int base) {
c->code.append((util::log(scale) << 6) | (regCode(index) << 3) | regCode(base));
}
void modrmSib(Context* c, int width, int a, int scale, int index, int base) {
if (index == lir::NoRegister) {
modrm(c, width, base, a);
if (regCode(base) == rsp) {
sib(c, 0x00, rsp, rsp);
}
} else {
modrm(c, width, rsp, a);
sib(c, scale, index, base);
}
}
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset) {
if (offset == 0 and regCode(base) != rbp) {
modrmSib(c, 0x00, a, scale, index, base);
} else if (vm::fitsInInt8(offset)) {
modrmSib(c, 0x40, a, scale, index, base);
c->code.append(offset);
} else {
modrmSib(c, 0x80, a, scale, index, base);
c->code.append4(offset);
}
}
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b) {
modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset);
}
void opcode(Context* c, uint8_t op) {
c->code.append(op);
}
void opcode(Context* c, uint8_t op1, uint8_t op2) {
c->code.append(op1);
c->code.append(op2);
}
void unconditional(Context* c, unsigned jump, lir::Constant* a) {
appendOffsetTask(c, a->value, offsetPromise(c), 5);
opcode(c, jump);
c->code.append4(0);
}
void conditional(Context* c, unsigned condition, lir::Constant* a) {
appendOffsetTask(c, a->value, offsetPromise(c), 6);
opcode(c, 0x0f, condition);
c->code.append4(0);
}
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b)
{
assert(c, aSize >= 4);
assert(c, aSize == bSize);
if (isFloatReg(a) and isFloatReg(b)) {
if (aSize == 4) {
opcode(c, 0xf3);
maybeRex(c, 4, a, b);
opcode(c, 0x0f, 0x10);
modrm(c, 0xc0, a, b);
} else {
opcode(c, 0xf2);
maybeRex(c, 4, b, a);
opcode(c, 0x0f, 0x10);
modrm(c, 0xc0, a, b);
}
} else if (isFloatReg(a)) {
opcode(c, 0x66);
maybeRex(c, aSize, a, b);
opcode(c, 0x0f, 0x7e);
modrm(c, 0xc0, b, a);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, b, a);
opcode(c, 0x0f, 0x6e);
modrm(c, 0xc0, a, b);
}
}
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b)
{
assert(c, aSize <= vm::TargetBytesPerWord);
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
moveCR2(c, aSize, a, aSize, &tmp, 0);
sseMoveRR(c, aSize, &tmp, bSize, b);
c->client->releaseTemporary(tmp.low);
}
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b)
{
assert(c, aSize >= 4);
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
opcode(c, 0xf3);
opcode(c, 0x0f, 0x7e);
modrmSibImm(c, b, a);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, b, a);
opcode(c, 0x0f, 0x6e);
modrmSibImm(c, b, a);
}
}
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Memory* b)
{
assert(c, aSize >= 4);
assert(c, aSize == bSize);
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
opcode(c, 0x66);
opcode(c, 0x0f, 0xd6);
modrmSibImm(c, a, b);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, a, b);
opcode(c, 0x0f, 0x7e);
modrmSibImm(c, a, b);
}
}
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target) {
switch (op) {
case lir::JumpIfEqual:
conditional(c, 0x84, target);
break;
case lir::JumpIfNotEqual:
conditional(c, 0x85, target);
break;
case lir::JumpIfLess:
conditional(c, 0x8c, target);
break;
case lir::JumpIfGreater:
conditional(c, 0x8f, target);
break;
case lir::JumpIfLessOrEqual:
conditional(c, 0x8e, target);
break;
case lir::JumpIfGreaterOrEqual:
conditional(c, 0x8d, target);
break;
default:
abort(c);
}
}
void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target) {
switch (op) {
case lir::JumpIfFloatEqual:
conditional(c, 0x84, target);
break;
case lir::JumpIfFloatNotEqual:
conditional(c, 0x85, target);
break;
case lir::JumpIfFloatLess:
conditional(c, 0x82, target);
break;
case lir::JumpIfFloatGreater:
conditional(c, 0x87, target);
break;
case lir::JumpIfFloatLessOrEqual:
conditional(c, 0x86, target);
break;
case lir::JumpIfFloatGreaterOrEqual:
conditional(c, 0x83, target);
break;
case lir::JumpIfFloatLessOrUnordered:
conditional(c, 0x82, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatGreaterOrUnordered:
conditional(c, 0x87, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatLessOrEqualOrUnordered:
conditional(c, 0x86, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
conditional(c, 0x83, target);
conditional(c, 0x8a, target);
break;
default:
abort(c);
}
}
void floatRegOp(Context* c, unsigned aSize, lir::Register* a, unsigned bSize,
lir::Register* b, uint8_t op, uint8_t mod)
{
if (aSize == 4) {
opcode(c, 0xf3);
} else {
opcode(c, 0xf2);
}
maybeRex(c, bSize, b, a);
opcode(c, 0x0f, op);
modrm(c, mod, a, b);
}
void floatMemOp(Context* c, unsigned aSize, lir::Memory* a, unsigned bSize,
lir::Register* b, uint8_t op)
{
if (aSize == 4) {
opcode(c, 0xf3);
} else {
opcode(c, 0xf2);
}
maybeRex(c, bSize, b, a);
opcode(c, 0x0f, op);
modrmSibImm(c, b, a);
}
void moveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void moveCR2(Context* c, UNUSED unsigned aSize, lir::Constant* a,
UNUSED unsigned bSize, lir::Register* b, unsigned promiseOffset)
{
if (vm::TargetBytesPerWord == 4 and bSize == 8) {
int64_t v = a->value->value();
ResolvedPromise high((v >> 32) & 0xFFFFFFFF);
lir::Constant ah(&high);
ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low);
lir::Register bh(b->high);
moveCR(c, 4, &al, 4, b);
moveCR(c, 4, &ah, 4, &bh);
} else {
maybeRex(c, vm::TargetBytesPerWord, b);
opcode(c, 0xb8 + regCode(b));
if (a->value->resolved()) {
c->code.appendTargetAddress(a->value->value());
} else {
appendImmediateTask
(c, a->value, offsetPromise(c), vm::TargetBytesPerWord, promiseOffset);
c->code.appendTargetAddress(static_cast<vm::target_uintptr_t>(0));
}
}
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,101 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H
#include <stdint.h>
#include "avian/common.h"
#include <avian/vm/codegen/lir.h>
#include "registers.h"
namespace avian {
namespace codegen {
namespace x86 {
class Context;
void maybeRex(Context* c, unsigned size, int a, int index, int base, bool always);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void maybeRex(Context* c, unsigned size, lir::Register* a);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b);
void maybeRex(Context* c, unsigned size, lir::Memory* a);
inline int regCode(int a) {
return a & 7;
}
inline int regCode(lir::Register* a) {
return regCode(a->low);
}
inline bool isFloatReg(lir::Register* a) {
return a->low >= xmm0;
}
void modrm(Context* c, uint8_t mod, int a, int b);
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b);
void sib(Context* c, unsigned scale, int index, int base);
void modrmSib(Context* c, int width, int a, int scale, int index, int base);
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset);
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b);
void opcode(Context* c, uint8_t op);
void opcode(Context* c, uint8_t op1, uint8_t op2);
void unconditional(Context* c, unsigned jump, lir::Constant* a);
void conditional(Context* c, unsigned condition, lir::Constant* a);
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Memory* b);
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target);
void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target);
void floatRegOp(Context* c, unsigned aSize, lir::Register* a, unsigned bSize,
lir::Register* b, uint8_t op, uint8_t mod = 0xc0);
void floatMemOp(Context* c, unsigned aSize, lir::Memory* a, unsigned bSize,
lir::Register* b, uint8_t op);
void moveCR2(Context* c, UNUSED unsigned aSize, lir::Constant* a,
UNUSED unsigned bSize, lir::Register* b, unsigned promiseOffset);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H

View File

@ -0,0 +1,173 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <string.h>
#include "avian/allocator.h"
#include "avian/alloc-vector.h"
#include "avian/common.h"
#include "avian/zone.h"
#include <avian/util/abort.h>
#include <avian/vm/system/system.h>
#include "context.h"
#include "fixup.h"
#include "padding.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace x86 {
using namespace util;
ResolvedPromise* resolvedPromise(Context* c, int64_t value) {
return new(c->zone) ResolvedPromise(value);
}
OffsetPromise::OffsetPromise(Context* c, MyBlock* block, unsigned offset, AlignmentPadding* limit):
c(c), block(block), offset(offset), limit(limit), value_(-1)
{ }
bool OffsetPromise::resolved() {
return block->start != static_cast<unsigned>(~0);
}
int64_t OffsetPromise::value() {
assert(c, resolved());
if (value_ == -1) {
value_ = block->start + (offset - block->offset)
+ padding(block->firstPadding, block->start, block->offset, limit);
}
return value_;
}
Promise* offsetPromise(Context* c) {
return new(c->zone) OffsetPromise(c, c->lastBlock, c->code.length(), c->lastBlock->lastPadding);
}
void*
resolveOffset(vm::System* s, uint8_t* instruction, unsigned instructionSize,
int64_t value)
{
intptr_t v = reinterpret_cast<uint8_t*>(value)
- instruction - instructionSize;
expect(s, vm::fitsInInt32(v));
int32_t v4 = v;
memcpy(instruction + instructionSize - 4, &v4, 4);
return instruction + instructionSize;
}
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction,
unsigned instructionSize):
s(s),
instruction(instruction),
instructionSize(instructionSize)
{ }
bool OffsetListener::resolve(int64_t value, void** location) {
void* p = resolveOffset(s, instruction, instructionSize, value);
if (location) *location = p;
return false;
}
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
unsigned instructionSize):
Task(next),
promise(promise),
instructionOffset(instructionOffset),
instructionSize(instructionSize)
{ }
void OffsetTask::run(Context* c) {
if (promise->resolved()) {
resolveOffset
(c->s, c->result + instructionOffset->value(), instructionSize,
promise->value());
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
instructionSize);
}
}
void
appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
unsigned instructionSize)
{
OffsetTask* task =
new(c->zone) OffsetTask(c->tasks, promise, instructionOffset, instructionSize);
c->tasks = task;
}
ImmediateListener::ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset):
s(s), dst(dst), size(size), offset(offset)
{ }
void copy(vm::System* s, void* dst, int64_t src, unsigned size) {
switch (size) {
case 4: {
int32_t v = src;
memcpy(dst, &v, 4);
} break;
case 8: {
int64_t v = src;
memcpy(dst, &v, 8);
} break;
default: abort(s);
}
}
bool ImmediateListener::resolve(int64_t value, void** location) {
copy(s, dst, value, size);
if (location) *location = static_cast<uint8_t*>(dst) + offset;
return offset == 0;
}
ImmediateTask::ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset):
Task(next),
promise(promise),
offset(offset),
size(size),
promiseOffset(promiseOffset)
{ }
void ImmediateTask::run(Context* c) {
if (promise->resolved()) {
copy(c->s, c->result + offset->value(), promise->value(), size);
} else {
new (promise->listen(sizeof(ImmediateListener))) ImmediateListener
(c->s, c->result + offset->value(), size, promiseOffset);
}
}
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset)
{
c->tasks = new(c->zone) ImmediateTask
(c->tasks, promise, offset, size, promiseOffset);
}
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask) {
return new(c->zone) ShiftMaskPromise(base, shift, mask);
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,119 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H
#include <stdint.h>
#include <avian/vm/codegen/promise.h>
namespace vm {
class System;
}
namespace avian {
namespace codegen {
namespace x86 {
class Context;
class MyBlock;
class AlignmentPadding;
ResolvedPromise* resolvedPromise(Context* c, int64_t value);
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* c) = 0;
Task* next;
};
class OffsetPromise: public Promise {
public:
OffsetPromise(Context* c, MyBlock* block, unsigned offset, AlignmentPadding* limit);
virtual bool resolved();
virtual int64_t value();
Context* c;
MyBlock* block;
unsigned offset;
AlignmentPadding* limit;
int value_;
};
Promise* offsetPromise(Context* c);
void* resolveOffset(vm::System* s, uint8_t* instruction, unsigned instructionSize, int64_t value);
class OffsetListener: public Promise::Listener {
public:
OffsetListener(vm::System* s, uint8_t* instruction, unsigned instructionSize);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
uint8_t* instruction;
unsigned instructionSize;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset, unsigned instructionSize);
virtual void run(Context* c);
Promise* promise;
Promise* instructionOffset;
unsigned instructionSize;
};
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset, unsigned instructionSize);
class ImmediateListener: public Promise::Listener {
public:
ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
void* dst;
unsigned size;
unsigned offset;
};
class ImmediateTask: public Task {
public:
ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset);
virtual void run(Context* c);
Promise* promise;
Promise* offset;
unsigned size;
unsigned promiseOffset;
};
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset = 0);
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H

View File

@ -0,0 +1,175 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/common.h"
#include <avian/util/abort.h>
#include <avian/vm/codegen/lir.h>
#include "context.h"
#include "operations.h"
#include "multimethod.h"
#include "../multimethod.h"
namespace avian {
namespace codegen {
namespace x86 {
using namespace util;
unsigned index(ArchitectureContext*, lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
{
return operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
}
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
lir::OperandType operand1, lir::OperandType operand2)
{
assert(c, not isBranch(operation));
return lir::BinaryOperationCount + operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
}
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
}
void populateTables(ArchitectureContext* c) {
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations;
BinaryOperationType* bo = c->binaryOperations;
BranchOperationType* bro = c->branchOperations;
zo[lir::Return] = return_;
zo[lir::LoadBarrier] = ignore;
zo[lir::StoreStoreBarrier] = ignore;
zo[lir::StoreLoadBarrier] = storeLoadBarrier;
zo[lir::Trap] = trap;
uo[Multimethod::index(lir::Call, C)] = CAST1(callC);
uo[Multimethod::index(lir::Call, R)] = CAST1(callR);
uo[Multimethod::index(lir::Call, M)] = CAST1(callM);
uo[Multimethod::index(lir::AlignedCall, C)] = CAST1(alignedCallC);
uo[Multimethod::index(lir::LongCall, C)] = CAST1(longCallC);
uo[Multimethod::index(lir::AlignedLongCall, C)] = CAST1(alignedLongCallC);
uo[Multimethod::index(lir::Jump, R)] = CAST1(jumpR);
uo[Multimethod::index(lir::Jump, C)] = CAST1(jumpC);
uo[Multimethod::index(lir::Jump, M)] = CAST1(jumpM);
uo[Multimethod::index(lir::AlignedJump, C)] = CAST1(alignedJumpC);
uo[Multimethod::index(lir::LongJump, C)] = CAST1(longJumpC);
uo[Multimethod::index(lir::AlignedLongJump, C)] = CAST1(alignedLongJumpC);
bo[index(c, lir::Negate, R, R)] = CAST2(negateRR);
bo[index(c, lir::FloatNegate, R, R)] = CAST2(floatNegateRR);
bo[index(c, lir::Move, R, R)] = CAST2(moveRR);
bo[index(c, lir::Move, C, R)] = CAST2(moveCR);
bo[index(c, lir::Move, M, R)] = CAST2(moveMR);
bo[index(c, lir::Move, R, M)] = CAST2(moveRM);
bo[index(c, lir::Move, C, M)] = CAST2(moveCM);
bo[index(c, lir::Move, A, R)] = CAST2(moveAR);
bo[index(c, lir::FloatSquareRoot, R, R)] = CAST2(floatSqrtRR);
bo[index(c, lir::FloatSquareRoot, M, R)] = CAST2(floatSqrtMR);
bo[index(c, lir::MoveZ, R, R)] = CAST2(moveZRR);
bo[index(c, lir::MoveZ, M, R)] = CAST2(moveZMR);
bo[index(c, lir::MoveZ, C, R)] = CAST2(moveCR);
bo[index(c, lir::Add, R, R)] = CAST2(addRR);
bo[index(c, lir::Add, C, R)] = CAST2(addCR);
bo[index(c, lir::Subtract, C, R)] = CAST2(subtractCR);
bo[index(c, lir::Subtract, R, R)] = CAST2(subtractRR);
bo[index(c, lir::FloatAdd, R, R)] = CAST2(floatAddRR);
bo[index(c, lir::FloatAdd, M, R)] = CAST2(floatAddMR);
bo[index(c, lir::FloatSubtract, R, R)] = CAST2(floatSubtractRR);
bo[index(c, lir::FloatSubtract, M, R)] = CAST2(floatSubtractMR);
bo[index(c, lir::And, R, R)] = CAST2(andRR);
bo[index(c, lir::And, C, R)] = CAST2(andCR);
bo[index(c, lir::Or, R, R)] = CAST2(orRR);
bo[index(c, lir::Or, C, R)] = CAST2(orCR);
bo[index(c, lir::Xor, R, R)] = CAST2(xorRR);
bo[index(c, lir::Xor, C, R)] = CAST2(xorCR);
bo[index(c, lir::Multiply, R, R)] = CAST2(multiplyRR);
bo[index(c, lir::Multiply, C, R)] = CAST2(multiplyCR);
bo[index(c, lir::Divide, R, R)] = CAST2(divideRR);
bo[index(c, lir::FloatMultiply, R, R)] = CAST2(floatMultiplyRR);
bo[index(c, lir::FloatMultiply, M, R)] = CAST2(floatMultiplyMR);
bo[index(c, lir::FloatDivide, R, R)] = CAST2(floatDivideRR);
bo[index(c, lir::FloatDivide, M, R)] = CAST2(floatDivideMR);
bo[index(c, lir::Remainder, R, R)] = CAST2(remainderRR);
bo[index(c, lir::ShiftLeft, R, R)] = CAST2(shiftLeftRR);
bo[index(c, lir::ShiftLeft, C, R)] = CAST2(shiftLeftCR);
bo[index(c, lir::ShiftRight, R, R)] = CAST2(shiftRightRR);
bo[index(c, lir::ShiftRight, C, R)] = CAST2(shiftRightCR);
bo[index(c, lir::UnsignedShiftRight, R, R)] = CAST2(unsignedShiftRightRR);
bo[index(c, lir::UnsignedShiftRight, C, R)] = CAST2(unsignedShiftRightCR);
bo[index(c, lir::Float2Float, R, R)] = CAST2(float2FloatRR);
bo[index(c, lir::Float2Float, M, R)] = CAST2(float2FloatMR);
bo[index(c, lir::Float2Int, R, R)] = CAST2(float2IntRR);
bo[index(c, lir::Float2Int, M, R)] = CAST2(float2IntMR);
bo[index(c, lir::Int2Float, R, R)] = CAST2(int2FloatRR);
bo[index(c, lir::Int2Float, M, R)] = CAST2(int2FloatMR);
bo[index(c, lir::Absolute, R, R)] = CAST2(absoluteRR);
bo[index(c, lir::FloatAbsolute, R, R)] = CAST2(floatAbsoluteRR);
bro[branchIndex(c, R, R)] = CAST_BRANCH(branchRR);
bro[branchIndex(c, C, R)] = CAST_BRANCH(branchCR);
bro[branchIndex(c, C, M)] = CAST_BRANCH(branchCM);
bro[branchIndex(c, R, M)] = CAST_BRANCH(branchRM);
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,40 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H
#include "avian/common.h"
#include <avian/vm/codegen/lir.h>
namespace avian {
namespace codegen {
namespace x86 {
class ArchitectureContext;
unsigned index(ArchitectureContext*, lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
lir::OperandType operand1, lir::OperandType operand2);
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2);
void populateTables(ArchitectureContext* c);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,267 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H
#include "avian/common.h"
#include <avian/vm/codegen/lir.h>
#include "context.h"
namespace avian {
namespace codegen {
namespace x86 {
void return_(Context* c);
void trap(Context* c);
void ignore(Context*);
void storeLoadBarrier(Context* c);
void callC(Context* c, unsigned size UNUSED, lir::Constant* a);
void longCallC(Context* c, unsigned size, lir::Constant* a);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a);
void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a);
void longJumpC(Context* c, unsigned size, lir::Constant* a);
void callR(Context* c, unsigned size UNUSED, lir::Register* a);
void callM(Context* c, unsigned size UNUSED, lir::Memory* a);
void alignedCallC(Context* c, unsigned size, lir::Constant* a);
void alignedLongCallC(Context* c, unsigned size, lir::Constant* a);
void alignedJumpC(Context* c, unsigned size, lir::Constant* a);
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a);
void pushR(Context* c, unsigned size, lir::Register* a);
void popR(Context* c, unsigned size, lir::Register* a);
void negateR(Context* c, unsigned size, lir::Register* a);
void negateRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
void moveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void swapRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void moveRR(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Register* b);
void moveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void moveRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Memory* b);
void moveAR(Context* c, unsigned aSize, lir::Address* a,
unsigned bSize, lir::Register* b);
void moveCM(Context* c, unsigned aSize UNUSED, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void moveZRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void moveZMR(Context* c, unsigned aSize UNUSED, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void addCarryRR(Context* c, unsigned size, lir::Register* a,
lir::Register* b);
void addRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void addCarryCR(Context* c, unsigned size, lir::Constant* a,
lir::Register* b);
void addCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
lir::Register* b);
void subtractCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void subtractBorrowRR(Context* c, unsigned size, lir::Register* a,
lir::Register* b);
void subtractRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void andRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void andCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void orRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void orCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void xorRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void xorCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void multiplyRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void compareRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Memory* b);
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void compareFloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void branchLong(Context* c, lir::TernaryOperation op, lir::Operand* al,
lir::Operand* ah, lir::Operand* bl,
lir::Operand* bh, lir::Constant* target,
BinaryOperationType compare);
void branchRR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Register* b,
lir::Constant* target);
void branchCR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Register* b,
lir::Constant* target);
void branchRM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Memory* b,
lir::Constant* target);
void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Memory* b,
lir::Constant* target);
void multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void divideRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
void remainderRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void doShift(Context* c, UNUSED void (*shift)
(Context*, unsigned, lir::Register*, unsigned,
lir::Register*),
int type, UNUSED unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void shiftLeftRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void shiftLeftCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void shiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void shiftRightCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void unsignedShiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void unsignedShiftRightCR(Context* c, unsigned aSize UNUSED, lir::Constant* a,
unsigned bSize, lir::Register* b);
void floatSqrtRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSqrtMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAddRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAddMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSubtractRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSubtractMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatMultiplyRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatMultiplyMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatDivideRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatDivideMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void float2FloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void float2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void float2IntRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void float2IntMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void int2FloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void int2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void floatNegateRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAbsoluteRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void absoluteRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H

View File

@ -0,0 +1,68 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/alloc-vector.h"
#include "context.h"
#include "padding.h"
#include "block.h"
namespace avian {
namespace codegen {
namespace x86 {
AlignmentPadding::AlignmentPadding(Context* c, unsigned instructionOffset, unsigned alignment):
offset(c->code.length()),
instructionOffset(instructionOffset),
alignment(alignment),
next(0),
padding(-1)
{
if (c->lastBlock->firstPadding) {
c->lastBlock->lastPadding->next = this;
} else {
c->lastBlock->firstPadding = this;
}
c->lastBlock->lastPadding = this;
}
unsigned
padding(AlignmentPadding* p, unsigned start, unsigned offset,
AlignmentPadding* limit)
{
unsigned padding = 0;
if (limit) {
if (limit->padding == -1) {
for (; p; p = p->next) {
if (p->padding == -1) {
unsigned index = p->offset - offset;
while ((start + index + padding + p->instructionOffset)
% p->alignment)
{
++ padding;
}
p->padding = padding;
if (p == limit) break;
} else {
padding = p->padding;
}
}
} else {
padding = limit->padding;
}
}
return padding;
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,39 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H
namespace avian {
namespace codegen {
namespace x86 {
class Context;
class AlignmentPadding {
public:
AlignmentPadding(Context* c, unsigned instructionOffset, unsigned alignment);
unsigned offset;
unsigned instructionOffset;
unsigned alignment;
AlignmentPadding* next;
int padding;
};
unsigned
padding(AlignmentPadding* p, unsigned start, unsigned offset,
AlignmentPadding* limit);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H

View File

@ -0,0 +1,67 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H
namespace avian {
namespace codegen {
namespace x86 {
enum {
rax = 0,
rcx = 1,
rdx = 2,
rbx = 3,
rsp = 4,
rbp = 5,
rsi = 6,
rdi = 7,
r8 = 8,
r9 = 9,
r10 = 10,
r11 = 11,
r12 = 12,
r13 = 13,
r14 = 14,
r15 = 15,
};
enum {
xmm0 = r15 + 1,
xmm1,
xmm2,
xmm3,
xmm4,
xmm5,
xmm6,
xmm7,
xmm8,
xmm9,
xmm10,
xmm11,
xmm12,
xmm13,
xmm14,
xmm15,
};
const int LongJumpRegister = r10;
const unsigned GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff : 0x0000ffff;
const unsigned FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000 : 0xffff0000;
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H

View File

@ -8,6 +8,8 @@
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/common.h"
#include <avian/vm/codegen/targets.h>
#include "avian/environment.h"
@ -15,7 +17,7 @@
namespace avian {
namespace codegen {
Assembler::Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures UNUSED) {
Architecture* makeArchitectureNative(vm::System* system, bool useNativeFeatures UNUSED) {
#ifndef AVIAN_TARGET_ARCH
#error "Must specify native target!"
#endif

File diff suppressed because it is too large Load Diff

View File

@ -16,6 +16,7 @@
#include "avian/arch.h"
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/architecture.h>
#include <avian/vm/codegen/compiler.h>
#include <avian/vm/codegen/targets.h>
@ -294,7 +295,7 @@ class MyThread: public Thread {
void** thunkTable;
CallTrace* trace;
Reference* reference;
avian::codegen::Assembler::Architecture* arch;
avian::codegen::Architecture* arch;
Context* transition;
TraceContext* traceContext;
uintptr_t stackLimit;

View File

@ -0,0 +1,116 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <avian/vm/system/system.h>
#include <avian/util/arg-parser.h>
#include <avian/vm/codegen/lir.h>
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/targets.h>
#include <avian/vm/codegen/registers.h>
#include <avian/vm/heap/heap.h>
// since we aren't linking against libstdc++, we must implement this
// ourselves:
extern "C" void __cxa_pure_virtual(void) { abort(); }
using namespace vm;
using namespace avian::codegen;
using namespace avian::util;
class BasicEnv {
public:
System* s;
Heap* heap;
Architecture* arch;
BasicEnv():
s(makeSystem(0)),
heap(makeHeap(s, 32 * 1024)),
arch(makeArchitectureNative(s, true))
{
arch->acquire();
}
~BasicEnv() {
arch->release();
s->dispose();
}
};
class Asm {
public:
Zone zone;
Assembler* a;
Asm(BasicEnv& env):
zone(env.s, env.heap, 8192),
a(env.arch->makeAssembler(env.heap, &zone))
{ }
~Asm() {
a->dispose();
}
};
void generateCode(BasicEnv& env) {
Asm a(env);
for(RegisterIterator it(env.arch->registerFile()->generalRegisters); it.hasNext(); ) {
int r = it.next();
lir::Register reg(r);
a.a->apply(lir::Add,
OperandInfo(4, lir::RegisterOperand, &reg),
OperandInfo(4, lir::RegisterOperand, &reg),
OperandInfo(4, lir::RegisterOperand, &reg));
}
unsigned length = a.a->endBlock(false)->resolve(0, 0);
printf("length: %d\n", length);
uint8_t* data = static_cast<uint8_t*>(env.s->tryAllocate(length));
a.a->setDestination(data);
a.a->write();
for(unsigned i = 0; i < length; i++) {
printf("%02x ", data[i]);
}
printf("\n");
env.s->free(data);
}
class Arguments {
public:
const char* output;
const char* outputFormat;
Arguments(int argc, char** argv) {
ArgParser parser;
Arg out(parser, true, "output", "<output object file>");
Arg format(parser, true, "format", "<format of output object file>");
if(!parser.parse(argc, argv)) {
exit(1);
}
output = out.value;
outputFormat = format.value;
// TODO: sanitize format values
}
};
int main(int argc, char** argv) {
Arguments args(argc, argv);
BasicEnv env;
generateCode(env);
return 0;
}

View File

@ -21,12 +21,16 @@
#include <avian/util/runtime-array.h>
#include "avian/lzma.h"
#include <avian/util/arg-parser.h>
#include <avian/util/abort.h>
// since we aren't linking against libstdc++, we must implement this
// ourselves:
extern "C" void __cxa_pure_virtual(void) { abort(); }
using namespace vm;
using namespace avian::tools;
using namespace avian::util;
using namespace avian::codegen;
namespace {
@ -1719,105 +1723,6 @@ writeBootImage(Thread* t, uintptr_t* arguments)
return 1;
}
class Arg;
class ArgParser {
public:
Arg* first;
Arg** last;
ArgParser():
first(0),
last(&first) {}
bool parse(int ac, const char** av);
void printUsage(const char* exe);
};
class Arg {
public:
Arg* next;
bool required;
const char* name;
const char* desc;
const char* value;
Arg(ArgParser& parser, bool required, const char* name, const char* desc):
next(0),
required(required),
name(name),
desc(desc),
value(0)
{
*parser.last = this;
parser.last = &next;
}
};
bool ArgParser::parse(int ac, const char** av) {
Arg* state = 0;
for(int i = 1; i < ac; i++) {
if(state) {
if(state->value) {
fprintf(stderr, "duplicate parameter %s: '%s' and '%s'\n", state->name, state->value, av[i]);
return false;
}
state->value = av[i];
state = 0;
} else {
if(av[i][0] != '-') {
fprintf(stderr, "expected -parameter\n");
return false;
}
bool found = false;
for(Arg* arg = first; arg; arg = arg->next) {
if(::strcmp(arg->name, &av[i][1]) == 0) {
found = true;
if (arg->desc == 0) {
arg->value = "true";
} else {
state = arg;
}
}
}
if (not found) {
fprintf(stderr, "unrecognized parameter %s\n", av[i]);
return false;
}
}
}
if(state) {
fprintf(stderr, "expected argument after -%s\n", state->name);
return false;
}
for(Arg* arg = first; arg; arg = arg->next) {
if(arg->required && !arg->value) {
fprintf(stderr, "expected value for %s\n", arg->name);
return false;
}
}
return true;
}
void ArgParser::printUsage(const char* exe) {
fprintf(stderr, "usage:\n%s \\\n", exe);
for(Arg* arg = first; arg; arg = arg->next) {
const char* lineEnd = arg->next ? " \\" : "";
if(arg->required) {
fprintf(stderr, " -%s\t%s%s\n", arg->name, arg->desc, lineEnd);
} else if (arg->desc) {
fprintf(stderr, " [-%s\t%s]%s\n", arg->name, arg->desc, lineEnd);
} else {
fprintf(stderr, " [-%s]%s\n", arg->name, lineEnd);
}
}
}
char*
myStrndup(const char* src, unsigned length)
{

98
src/util/arg-parser.cpp Normal file
View File

@ -0,0 +1,98 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <stdio.h>
#include <string.h>
#include <avian/util/arg-parser.h>
namespace avian {
namespace util {
Arg::Arg(ArgParser& parser, bool required, const char* name, const char* desc):
next(0),
required(required),
name(name),
desc(desc),
value(0)
{
*parser.last = this;
parser.last = &next;
}
ArgParser::ArgParser():
first(0),
last(&first) {}
bool ArgParser::parse(int ac, const char* const* av) {
Arg* state = 0;
for(int i = 1; i < ac; i++) {
if(state) {
if(state->value) {
fprintf(stderr, "duplicate parameter %s: '%s' and '%s'\n", state->name, state->value, av[i]);
return false;
}
state->value = av[i];
state = 0;
} else {
if(av[i][0] != '-') {
fprintf(stderr, "expected -parameter\n");
return false;
}
bool found = false;
for(Arg* arg = first; arg; arg = arg->next) {
if(strcmp(arg->name, &av[i][1]) == 0) {
found = true;
if (arg->desc == 0) {
arg->value = "true";
} else {
state = arg;
}
}
}
if (not found) {
fprintf(stderr, "unrecognized parameter %s\n", av[i]);
return false;
}
}
}
if(state) {
fprintf(stderr, "expected argument after -%s\n", state->name);
return false;
}
for(Arg* arg = first; arg; arg = arg->next) {
if(arg->required && !arg->value) {
fprintf(stderr, "expected value for %s\n", arg->name);
return false;
}
}
return true;
}
void ArgParser::printUsage(const char* exe) {
fprintf(stderr, "usage:\n%s \\\n", exe);
for(Arg* arg = first; arg; arg = arg->next) {
const char* lineEnd = arg->next ? " \\" : "";
if(arg->required) {
fprintf(stderr, " -%s\t%s%s\n", arg->name, arg->desc, lineEnd);
} else if (arg->desc) {
fprintf(stderr, " [-%s\t%s]%s\n", arg->name, arg->desc, lineEnd);
} else {
fprintf(stderr, " [-%s]%s\n", arg->name, lineEnd);
}
}
}
} // namespace util
} // namespace avian

View File

@ -20,6 +20,7 @@ printf "%12s------- Unit tests -------\n" ""
${unit_tester} 2>>${log}
if [ "${?}" != "0" ]; then
trouble=1
echo "unit tests failed!"
fi
echo

View File

@ -16,6 +16,7 @@
#include "avian/target.h"
#include <avian/vm/codegen/assembler.h>
#include <avian/vm/codegen/architecture.h>
#include <avian/vm/codegen/targets.h>
#include <avian/vm/codegen/lir.h>
@ -29,7 +30,7 @@ class BasicEnv {
public:
System* s;
Heap* heap;
Assembler::Architecture* arch;
Architecture* arch;
BasicEnv():
s(makeSystem(0)),
@ -84,12 +85,11 @@ public:
for(int op = (int)lir::Call; op < (int)lir::AlignedJump; op++) {
bool thunk;
uint8_t typeMask;
uint64_t registerMask;
env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
OperandMask mask;
env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
assertFalse(thunk);
assertNotEqual(static_cast<uint8_t>(0), typeMask);
assertNotEqual(static_cast<uint64_t>(0), registerMask);
assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
assertNotEqual(static_cast<uint64_t>(0), mask.registerMask);
}
}

View File

@ -0,0 +1,44 @@
/* Copyright (c) 2008-2011, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <stdio.h>
#include <avian/vm/codegen/registers.h>
#include "test-harness.h"
using namespace avian::codegen;
using namespace vm;
class RegisterIteratorTest : public Test {
public:
RegisterIteratorTest():
Test("RegisterIterator")
{}
virtual void run() {
RegisterMask regs(0x55);
assertEqual<unsigned>(0, regs.start);
assertEqual<unsigned>(7, regs.limit);
RegisterIterator it(regs);
assertTrue(it.hasNext());
assertEqual<unsigned>(0, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(2, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(4, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(6, it.next());
assertFalse(it.hasNext());
}
} registerIteratorTest;

View File

@ -10,7 +10,6 @@
#include <stdio.h>
#include "avian/common.h"
#include "test-harness.h"
// since we aren't linking against libstdc++, we must implement this

View File

@ -11,6 +11,9 @@
#ifndef TEST_HARNESS_H
#define TEST_HARNESS_H
#include "avian/common.h"
#include <stdio.h>
class Test {
private:
Test* next;
@ -53,6 +56,14 @@ protected:
}
runs++;
}
void assertEqual(const char* expected, const char* actual) {
if((expected == 0 && actual != 0) || (expected != 0 && actual == 0) || strcmp(expected, actual) != 0) {
fprintf(stderr, "assertion failure, expected: \"%s\", actual: \"%s\"\n", expected, actual);
failures++;
}
runs++;
}
template<class T>
void assertNotEqual(T expected, T actual) {

View File

@ -0,0 +1,69 @@
/* Copyright (c) 2008-2011, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <stdio.h>
#include "avian/common.h"
#include <avian/util/arg-parser.h>
#include "test-harness.h"
using namespace avian::util;
class ArgParserTest : public Test {
public:
ArgParserTest():
Test("ArgParser")
{}
virtual void run() {
{
ArgParser parser;
Arg arg1(parser, false, "arg1", "<value>");
Arg required2(parser, true, "required2", "<value>");
const char* args[] = {
"myExecutable",
"-arg1", "myValue1",
"-required2", "myRequired2",
0
};
assertTrue(parser.parse(sizeof(args) / sizeof(char*) - 1, args));
assertEqual("myValue1", arg1.value);
assertEqual("myRequired2", required2.value);
}
{
ArgParser parser;
Arg arg1(parser, false, "arg1", "<value>");
Arg required2(parser, true, "required2", "<value>");
const char* args[] = {
"myExecutable",
"-arg1", "myValue1",
"-required2",
0
};
assertFalse(parser.parse(sizeof(args) / sizeof(char*) - 1, args));
}
{
ArgParser parser;
Arg arg1(parser, false, "arg1", "<value>");
Arg required2(parser, true, "required2", "<value>");
const char* args[] = {
"myExecutable",
"-arg1", "myValue1",
0
};
assertFalse(parser.parse(sizeof(args) / sizeof(char*) - 1, args));
}
}
} argParserTest;