Merge branch 'master' of https://github.com/ReadyTalk/avian into avian-pack

This commit is contained in:
Ilya Mizus 2015-01-17 18:34:21 +03:00
commit 89ab73c7e2
80 changed files with 5029 additions and 2006 deletions

View File

@ -1,12 +1,23 @@
language: cpp language: cpp
cache: apt
os:
- linux
- osx
env: env:
matrix:
- BUILD_STEP=""
- BUILD_STEP="PUBLISH"
global: global:
- TERM=dumb - TERM=dumb
- secure: Pe7OLUvdk3DeTAShyaYsdAXBcnjdNOrtfTPt1mCP5+JKBUw3z0n7BpTgATVRBmkn866pDqxL0N3+jBgjKUpEpqOj23bTB2AeDdIzlYkFGvTd3GQQV1lMFn16I9LOgWJhkfICTJ1rIYiNf8xSkJoeKGB7eGUM0KGsTvelFfsaGVc= - secure: rh1utD4shKmYtokItuRYEF9WsfTnvZO5XqnTU4DHTS7quHHgLihtOO2/3+B+2W2hEd5Obr2or8zx+zmzWcNUyLokZ0j/FRLWSScNkLzTtm12pupLrncY+/g1NIdfbhn+OLRIzBz6zB6m6a2qWFEJ+bScUNGD/7wZVtzkujqlDEE=
# BINTRAY_USER - secure: j9DOzZMCYk/BzhKK9u4XMKpCzyGOsvP2cLTp6cXE7/tkWDAPVv6BFmeqNbiLTEqk0aGX+HYbY/2YVtpRZmDzfeWtnBFF5mL1Y1tgzx1Kf155C+P6rZgt5PiQTUdXlp2umuRifY1BbXAPc3DZ2UOPUjWKnLHVbZLQRgO1zimmMx8=
- secure: CauPpj2QrbgEaePEPQx+FDeZqc46HmWXHfdAyn+DA9g6np3RMyc+N/ESrJ9efc4E772rnxory2QRyyNffzi29ceNTpIzsIRDuk5WvvC+zePEsQm8kX1afsDK5g16QEvJN24jGSW9ci9uxuknqjeKVOukcFQdxssIyDe11tYWJeI=
# BINTRAY_API_KEY matrix:
- secure: Gao6KTkCbqrdCDI2hQswh1v+SpHBeF4hR1WXmCe9gjmPyJb731K2eWigQOrhPOw7Ns3nubo1FwCb5YevYmwPMLryh0f4sKJyqLL1MLbffNl5GttNF2oO3p73cJpgBfHdzabAMwAYGYbneqUZ0Qn4K8RkzXUaoBDv465KmZhqbA0= fast_finish: true
script: "./test/ci.sh" exclude:
after_success: - os: osx
- "./gradlew artifactoryPublish" env: BUILD_STEP=""
script: ./test/ci.sh ${BUILD_STEP}

View File

@ -59,10 +59,10 @@ Supported Platforms
Avian can currently target the following platforms: Avian can currently target the following platforms:
* Linux (i386, x86_64, and ARM) * Linux (i386, x86_64, ARM, and ARM64)
* Windows (i386 and x86_64) * Windows (i386 and x86_64)
* Mac OS X (i386 and x86_64) * Mac OS X (i386 and x86_64)
* Apple iOS (i386 and ARM) * Apple iOS (i386, ARM, and ARM64)
* FreeBSD (i386, x86_64) * FreeBSD (i386, x86_64)
@ -86,7 +86,7 @@ certain flags described below, all of which are optional.
$ make \ $ make \
platform={linux,windows,macosx,ios,freebsd} \ platform={linux,windows,macosx,ios,freebsd} \
arch={i386,x86_64,arm} \ arch={i386,x86_64,arm,arm64} \
process={compile,interpret} \ process={compile,interpret} \
mode={debug,debug-fast,fast,small} \ mode={debug,debug-fast,fast,small} \
lzma=<lzma source directory> \ lzma=<lzma source directory> \

View File

@ -107,12 +107,6 @@ model {
operatingSystem SupportedOS.valueOf(platform.toUpperCase()) operatingSystem SupportedOS.valueOf(platform.toUpperCase())
architecture "${arch}" architecture "${arch}"
} }
if(platformArch != currentPlatformArch) {
create(currentPlatformArch) {
operatingSystem SupportedOS.CURRENT
architecture "${currentArch}"
}
}
} }
tasks { tasks {
@ -235,7 +229,7 @@ publishing {
artifact("${nativeBuildDir}/avian${binSuffix}") { artifact("${nativeBuildDir}/avian${binSuffix}") {
name "avian" name "avian"
type publishBinSuffix type publishBinSuffix
extension binSuffix extension publishBinSuffix
} }
artifact("${nativeBuildDir}/libavian.a") { artifact("${nativeBuildDir}/libavian.a") {
@ -249,6 +243,11 @@ publishing {
} }
artifactoryPublish { artifactoryPublish {
onlyIf {
// TRAVIS_BRANCH reports master if it is a master build or a PR going to master
// TRAVIS_PULL_REQUEST reports false if not a pull request and the PR number if it is
System.env.'TRAVIS_BRANCH' == "master" && System.env.'TRAVIS_PULL_REQUEST' == "false"
}
dependsOn assemble dependsOn assemble
} }

View File

@ -295,9 +295,9 @@ public class Classes {
} }
Class c = loader.loadClass(name); Class c = loader.loadClass(name);
VMClass vmc = SystemClassLoader.vmClass(c); VMClass vmc = SystemClassLoader.vmClass(c);
Classes.link(vmc, loader); link(vmc, loader);
if (initialize) { if (initialize) {
Classes.initialize(vmc); initialize(vmc);
} }
return c; return c;
} }
@ -315,7 +315,7 @@ public class Classes {
} else { } else {
if (name.length() == 1) { if (name.length() == 1) {
return SystemClassLoader.getClass return SystemClassLoader.getClass
(Classes.primitiveClass(name.charAt(0))); (primitiveClass(name.charAt(0)));
} else { } else {
throw new ClassNotFoundException(name); throw new ClassNotFoundException(name);
} }
@ -378,7 +378,7 @@ public class Classes {
public static int findField(VMClass vmClass, String name) { public static int findField(VMClass vmClass, String name) {
if (vmClass.fieldTable != null) { if (vmClass.fieldTable != null) {
Classes.link(vmClass); link(vmClass);
for (int i = 0; i < vmClass.fieldTable.length; ++i) { for (int i = 0; i < vmClass.fieldTable.length; ++i) {
if (toString(vmClass.fieldTable[i].name).equals(name)) { if (toString(vmClass.fieldTable[i].name).equals(name)) {
@ -426,7 +426,7 @@ public class Classes {
{ {
VMMethod[] methodTable = vmClass.methodTable; VMMethod[] methodTable = vmClass.methodTable;
if (methodTable != null) { if (methodTable != null) {
Classes.link(vmClass); link(vmClass);
if (parameterTypes == null) { if (parameterTypes == null) {
parameterTypes = new Class[0]; parameterTypes = new Class[0];
@ -464,7 +464,7 @@ public class Classes {
Method[] array = new Method[countMethods(vmClass, publicOnly)]; Method[] array = new Method[countMethods(vmClass, publicOnly)];
VMMethod[] methodTable = vmClass.methodTable; VMMethod[] methodTable = vmClass.methodTable;
if (methodTable != null) { if (methodTable != null) {
Classes.link(vmClass); link(vmClass);
int ai = 0; int ai = 0;
for (int i = 0, j = declaredMethodCount(vmClass); i < j; ++i) { for (int i = 0, j = declaredMethodCount(vmClass); i < j; ++i) {
@ -498,7 +498,7 @@ public class Classes {
public static Field[] getFields(VMClass vmClass, boolean publicOnly) { public static Field[] getFields(VMClass vmClass, boolean publicOnly) {
Field[] array = new Field[countFields(vmClass, publicOnly)]; Field[] array = new Field[countFields(vmClass, publicOnly)];
if (vmClass.fieldTable != null) { if (vmClass.fieldTable != null) {
Classes.link(vmClass); link(vmClass);
int ai = 0; int ai = 0;
for (int i = 0; i < vmClass.fieldTable.length; ++i) { for (int i = 0; i < vmClass.fieldTable.length; ++i) {

View File

@ -18,7 +18,7 @@ package java.security;
*/ */
public class AccessController { public class AccessController {
public static Object doPrivileged (PrivilegedAction action) { public static <T> T doPrivileged (PrivilegedAction<T> action) {
return action.run(); return action.run();
} }

View File

@ -340,7 +340,101 @@ public class Collections {
} }
public static <V> Set<V> synchronizedSet(Set<V> set) { public static <V> Set<V> synchronizedSet(Set<V> set) {
return new SynchronizedSet<V> (new Object(), set); return new SynchronizedSet<V> (set, set);
}
static class SynchronizedList<T>
extends SynchronizedCollection<T>
implements List<T>
{
private final List<T> list;
public SynchronizedList(List<T> list) {
super(list, list);
this.list = list;
}
@Override
public T get(int index) {
synchronized (lock) {
return list.get(index);
}
}
@Override
public T set(int index, T value) {
synchronized (lock) {
return list.set(index, value);
}
}
@Override
public T remove(int index) {
synchronized (lock) {
return list.remove(index);
}
}
@Override
public void add(int index, T element) {
synchronized (lock) {
list.add(index, element);
}
}
@Override
public boolean addAll(int startIndex, Collection<? extends T> c) {
synchronized (lock) {
return list.addAll(startIndex, c);
}
}
@Override
public int indexOf(Object value) {
synchronized (lock) {
return list.indexOf(value);
}
}
@Override
public int lastIndexOf(Object value) {
synchronized (lock) {
return list.lastIndexOf(value);
}
}
@Override
public ListIterator<T> listIterator(int index) {
// as described in the javadocs, user should be synchronized on list before calling
return list.listIterator(index);
}
@Override
public ListIterator<T> listIterator() {
// as described in the javadocs, user should be synchronized on list before calling
return list.listIterator();
}
}
static class RandomAccessSynchronizedList<T>
extends SynchronizedList<T>
implements RandomAccess
{
public RandomAccessSynchronizedList(List<T> list) {
super(list);
}
}
public static <T> List<T> synchronizedList(List<T> list) {
List<T> result;
if (list instanceof RandomAccess) {
result = new RandomAccessSynchronizedList<T>(list);
} else {
result = new SynchronizedList<T>(list);
}
return result;
} }
static class SynchronizedIterator<T> implements Iterator<T> { static class SynchronizedIterator<T> implements Iterator<T> {

View File

@ -49,6 +49,14 @@ public class ConcurrentHashMap<K,V>
this(); this();
} }
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this();
}
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
this();
}
public boolean isEmpty() { public boolean isEmpty() {
return content.size == 0; return content.size == 0;
} }

View File

@ -10,6 +10,6 @@
package java.util.concurrent; package java.util.concurrent;
public interface Delayed { public interface Delayed extends Comparable<Delayed> {
public long getDelay(TimeUnit unit); public long getDelay(TimeUnit unit);
} }

View File

@ -15,6 +15,8 @@ import java.util.Collection;
public interface ExecutorService extends Executor { public interface ExecutorService extends Executor {
public void shutdown(); public void shutdown();
public List<Runnable> shutdownNow();
public boolean isShutdown(); public boolean isShutdown();
public boolean isTerminated(); public boolean isTerminated();

View File

@ -0,0 +1,24 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
package java.util.concurrent;
public class Executors {
public static <T> Callable<T> callable(final Runnable task, final T result) {
return new Callable<T>() {
@Override
public T call() throws Exception {
task.run();
return result;
}
};
}
}

71
docker/arm64/Dockerfile Normal file
View File

@ -0,0 +1,71 @@
FROM joshuawarner32/avian-build
MAINTAINER Joshua Warner, joshuawarner32@gmail.com
RUN dpkg --add-architecture arm64 && \
apt-get update && \
mkdir -p /opt/arm64 && \
apt-get download libc6-dev:arm64 \
linux-headers-3.16.0-4-all-arm64:arm64 \
linux-libc-dev:arm64 \
libc6:arm64 \
zlib1g-dev:arm64 \
zlib1g:arm64 && \
for x in *.deb; do \
dpkg -x $x /opt/arm64; \
done && \
rm *.deb && \
apt-get install -y \
wget \
libgmp-dev \
libmpfr-dev \
libmpc-dev \
libisl-dev && \
apt-get clean all && \
for x in $(find /opt/arm64 -type l); do \
r=$(readlink "$x" | sed 's,^/,/opt/arm64/,g'); \
rm "$x"; \
ln -s "$r" "$x"; \
done
RUN mkdir -p /var/src
# Build & install binutils
RUN wget ftp://sourceware.org/pub/binutils/snapshots/binutils-2.23.91.tar.bz2 -O /var/src/binutils.tar.bz2 && \
cd /var/src/ && tar -xjf binutils.tar.bz2 && rm binutils.tar.bz2 && \
cd /var/src/binutils* && \
mkdir build && \
cd build && \
../configure \
--target=aarch64-linux-gnu \
--prefix=/opt/arm64 \
--disable-multilib \
--program-prefix=aarch64-linux-gnu- \
--with-sysroot=/opt/arm64 \
--with-headers=/opt/arm64/usr/include \
--disable-werror && \
make && \
make install && \
cd /var/src && \
rm -rf *
# build & install gcc
RUN wget http://www.netgull.com/gcc/releases/gcc-4.8.2/gcc-4.8.2.tar.bz2 -O /var/src/gcc.tar.bz2 && \
cd /var/src/ && tar -xjf gcc.tar.bz2 && rm gcc.tar.bz2 && \
cd /var/src/gcc* && \
mkdir build && \
cd build && \
../configure \
--target=aarch64-linux-gnu \
--enable-languages=c,c++ \
--prefix=/opt/arm64 \
--disable-multilib \
--program-prefix=aarch64-linux-gnu- \
--with-sysroot=/opt/arm64 \
--with-headers=/opt/arm64/usr/include \
--disable-werror && \
make && \
make install && \
cd /var/src && \
rm -rf *
ENV PATH $PATH:/opt/arm64/bin

View File

@ -8,6 +8,8 @@ if test $# -eq 0; then
exit 1 exit 1
fi fi
THE_USER="-u $(id -u "${USER}")"
while test $# -gt 1 ; do while test $# -gt 1 ; do
key="$1" key="$1"
case $key in case $key in
@ -16,6 +18,10 @@ while test $# -gt 1 ; do
CONTAINER="$1" CONTAINER="$1"
shift shift
;; ;;
-r|--root)
shift
THE_USER=
;;
--) --)
shift shift
break break
@ -32,4 +38,4 @@ fi
DIR=$(cd $(dirname "$0") && cd .. && pwd) DIR=$(cd $(dirname "$0") && cd .. && pwd)
docker run --rm -i -t -v "${DIR}":/var/avian -u $(id -u "${USER}") "${CONTAINER}" "${@}" docker run --rm -i -t -v "${DIR}":/var/avian ${THE_USER} "${CONTAINER}" "${@}"

View File

@ -1,4 +1,4 @@
#Thu Aug 28 14:47:06 MDT 2014 #Fri Jan 02 11:31:32 MST 2015
distributionBase=GRADLE_USER_HOME distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME zipStoreBase=GRADLE_USER_HOME

View File

@ -12,6 +12,7 @@
#define AVIAN_CODEGEN_ARCHITECTURE_H #define AVIAN_CODEGEN_ARCHITECTURE_H
#include "ir.h" #include "ir.h"
#include "registers.h"
namespace vm { namespace vm {
class Zone; class Zone;
@ -27,21 +28,29 @@ namespace codegen {
class Assembler; class Assembler;
class RegisterFile;
class OperandMask { class OperandMask {
public: public:
uint8_t typeMask; uint8_t typeMask;
uint64_t registerMask; RegisterMask lowRegisterMask;
RegisterMask highRegisterMask;
OperandMask(uint8_t typeMask, uint64_t registerMask) OperandMask(uint8_t typeMask,
: typeMask(typeMask), registerMask(registerMask) RegisterMask lowRegisterMask,
RegisterMask highRegisterMask)
: typeMask(typeMask),
lowRegisterMask(lowRegisterMask),
highRegisterMask(highRegisterMask)
{ {
} }
OperandMask() : typeMask(~0), registerMask(~static_cast<uint64_t>(0)) OperandMask() : typeMask(~0), lowRegisterMask(AnyRegisterMask), highRegisterMask(AnyRegisterMask)
{ {
} }
void setLowHighRegisterMasks(RegisterMask lowRegisterMask, RegisterMask highRegisterMask) {
this->lowRegisterMask = lowRegisterMask;
this->highRegisterMask = highRegisterMask;
}
}; };
class Architecture { class Architecture {
@ -50,13 +59,13 @@ class Architecture {
virtual const RegisterFile* registerFile() = 0; virtual const RegisterFile* registerFile() = 0;
virtual int scratch() = 0; virtual Register scratch() = 0;
virtual int stack() = 0; virtual Register stack() = 0;
virtual int thread() = 0; virtual Register thread() = 0;
virtual int returnLow() = 0; virtual Register returnLow() = 0;
virtual int returnHigh() = 0; virtual Register returnHigh() = 0;
virtual int virtualCallTarget() = 0; virtual Register virtualCallTarget() = 0;
virtual int virtualCallIndex() = 0; virtual Register virtualCallIndex() = 0;
virtual ir::TargetInfo targetInfo() = 0; virtual ir::TargetInfo targetInfo() = 0;
@ -67,14 +76,14 @@ class Architecture {
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0; virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0; virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
virtual bool reserved(int register_) = 0; virtual bool reserved(Register register_) = 0;
virtual unsigned frameFootprint(unsigned footprint) = 0; virtual unsigned frameFootprint(unsigned footprint) = 0;
virtual unsigned argumentFootprint(unsigned footprint) = 0; virtual unsigned argumentFootprint(unsigned footprint) = 0;
virtual bool argumentAlignment() = 0; virtual bool argumentAlignment() = 0;
virtual bool argumentRegisterAlignment() = 0; virtual bool argumentRegisterAlignment() = 0;
virtual unsigned argumentRegisterCount() = 0; virtual unsigned argumentRegisterCount() = 0;
virtual int argumentRegister(unsigned index) = 0; virtual Register argumentRegister(unsigned index) = 0;
virtual bool hasLinkRegister() = 0; virtual bool hasLinkRegister() = 0;

View File

@ -15,6 +15,7 @@
#include "avian/zone.h" #include "avian/zone.h"
#include <avian/codegen/lir.h> #include <avian/codegen/lir.h>
#include <avian/codegen/registers.h>
#include <avian/codegen/promise.h> #include <avian/codegen/promise.h>
namespace avian { namespace avian {
@ -25,11 +26,11 @@ class Architecture;
class OperandInfo { class OperandInfo {
public: public:
const unsigned size; const unsigned size;
const lir::OperandType type; const lir::Operand::Type type;
lir::Operand* const operand; lir::Operand* const operand;
inline OperandInfo(unsigned size, inline OperandInfo(unsigned size,
lir::OperandType type, lir::Operand::Type type,
lir::Operand* operand) lir::Operand* operand)
: size(size), type(type), operand(operand) : size(size), type(type), operand(operand)
{ {
@ -52,10 +53,10 @@ class Assembler {
public: public:
class Client { class Client {
public: public:
virtual int acquireTemporary(uint32_t mask = ~static_cast<uint32_t>(0)) = 0; virtual Register acquireTemporary(RegisterMask mask = AnyRegisterMask) = 0;
virtual void releaseTemporary(int r) = 0; virtual void releaseTemporary(Register r) = 0;
virtual void save(int r) = 0; virtual void save(Register r) = 0;
}; };
class Block { class Block {
@ -76,8 +77,8 @@ class Assembler {
virtual void popFrame(unsigned footprint) = 0; virtual void popFrame(unsigned footprint) = 0;
virtual void popFrameForTailCall(unsigned footprint, virtual void popFrameForTailCall(unsigned footprint,
int offset, int offset,
int returnAddressSurrogate, Register returnAddressSurrogate,
int framePointerSurrogate) = 0; Register framePointerSurrogate) = 0;
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint, virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
unsigned argumentFootprint) = 0; unsigned argumentFootprint) = 0;
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint, virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,

View File

@ -11,6 +11,8 @@
#ifndef AVIAN_CODEGEN_LIR_H #ifndef AVIAN_CODEGEN_LIR_H
#define AVIAN_CODEGEN_LIR_H #define AVIAN_CODEGEN_LIR_H
#include <avian/codegen/registers.h>
namespace avian { namespace avian {
namespace codegen { namespace codegen {
class Promise; class Promise;
@ -79,19 +81,8 @@ const unsigned NonBranchTernaryOperationCount = FloatMin + 1;
const unsigned BranchOperationCount = JumpIfFloatGreaterOrEqualOrUnordered const unsigned BranchOperationCount = JumpIfFloatGreaterOrEqualOrUnordered
- FloatMin; - FloatMin;
enum OperandType {
ConstantOperand,
AddressOperand,
RegisterOperand,
MemoryOperand
};
enum ValueType { ValueGeneral, ValueFloat }; enum ValueType { ValueGeneral, ValueFloat };
const unsigned OperandTypeCount = MemoryOperand + 1;
const int NoRegister = -1;
inline bool isBranch(lir::TernaryOperation op) inline bool isBranch(lir::TernaryOperation op)
{ {
return op > FloatMin; return op > FloatMin;
@ -128,6 +119,21 @@ inline bool isFloatUnaryOp(lir::BinaryOperation op)
} }
class Operand { class Operand {
public:
enum class Type {
Constant,
Address,
RegisterPair,
Memory
};
const static unsigned TypeCount = (unsigned)Type::Memory + 1;
const static unsigned ConstantMask = 1 << (unsigned)Type::Constant;
const static unsigned AddressMask = 1 << (unsigned)Type::Address;
const static unsigned RegisterPairMask = 1 << (unsigned)Type::RegisterPair;
const static unsigned MemoryMask = 1 << (unsigned)Type::Memory;
}; };
class Constant : public Operand { class Constant : public Operand {
@ -148,26 +154,26 @@ class Address : public Operand {
Promise* address; Promise* address;
}; };
class Register : public Operand { class RegisterPair : public Operand {
public: public:
Register(int low, int high = NoRegister) : low(low), high(high) RegisterPair(Register low, Register high = NoRegister) : low(low), high(high)
{ {
} }
int low; Register low;
int high; Register high;
}; };
class Memory : public Operand { class Memory : public Operand {
public: public:
Memory(int base, int offset, int index = NoRegister, unsigned scale = 1) Memory(Register base, int offset, Register index = NoRegister, unsigned scale = 1)
: base(base), offset(offset), index(index), scale(scale) : base(base), offset(offset), index(index), scale(scale)
{ {
} }
int base; Register base;
int offset; int offset;
int index; Register index;
unsigned scale; unsigned scale;
}; };

View File

@ -16,28 +16,189 @@
namespace avian { namespace avian {
namespace codegen { namespace codegen {
class RegisterMask { class RegisterMask;
class Register {
private:
int8_t _index;
public: public:
uint32_t mask; explicit constexpr Register(int8_t _index) : _index(_index) {}
uint8_t start; constexpr Register() : _index(-1) {}
uint8_t limit;
static unsigned maskStart(uint32_t mask); constexpr bool operator == (Register o) const {
static unsigned maskLimit(uint32_t mask); return _index == o._index;
}
inline RegisterMask(uint32_t mask) constexpr bool operator != (Register o) const {
: mask(mask), start(maskStart(mask)), limit(maskLimit(mask)) return !(*this == o);
{ }
constexpr RegisterMask operator | (Register o) const;
constexpr bool operator < (Register o) const {
return _index < o._index;
}
constexpr bool operator > (Register o) const {
return _index > o._index;
}
constexpr bool operator <= (Register o) const {
return _index <= o._index;
}
constexpr bool operator >= (Register o) const {
return _index >= o._index;
}
constexpr int index() const {
return _index;
} }
}; };
constexpr Register NoRegister;
class RegisterMask {
private:
uint64_t mask;
static constexpr unsigned maskStart(uint64_t mask, unsigned offset = 64) {
return mask == 0 ? (offset & 63) : maskStart(mask << 1, offset - 1);
}
static constexpr unsigned maskLimit(uint64_t mask, unsigned offset = 0) {
return mask == 0 ? offset : maskLimit(mask >> 1, offset + 1);
}
public:
constexpr RegisterMask(uint64_t mask) : mask(mask) {}
constexpr RegisterMask() : mask(0) {}
constexpr RegisterMask(Register reg) : mask(static_cast<uint64_t>(1) << reg.index()) {}
constexpr unsigned begin() const {
return maskStart(mask);
}
constexpr unsigned end() const {
return maskLimit(mask);
}
constexpr RegisterMask operator &(RegisterMask o) const {
return RegisterMask(mask & o.mask);
}
RegisterMask operator &=(RegisterMask o) {
mask &= o.mask;
return *this;
}
constexpr RegisterMask operator |(RegisterMask o) const {
return RegisterMask(mask | o.mask);
}
constexpr bool contains(Register reg) const {
return (mask & (static_cast<uint64_t>(1) << reg.index())) != 0;
}
constexpr bool containsExactly(Register reg) const {
return mask == (mask & (static_cast<uint64_t>(1) << reg.index()));
}
constexpr RegisterMask excluding(Register reg) const {
return RegisterMask(mask & ~(static_cast<uint64_t>(1) << reg.index()));
}
constexpr RegisterMask including(Register reg) const {
return RegisterMask(mask | (static_cast<uint64_t>(1) << reg.index()));
}
constexpr explicit operator uint64_t() const {
return mask;
}
constexpr explicit operator bool() const {
return mask != 0;
}
};
constexpr RegisterMask AnyRegisterMask(~static_cast<uint64_t>(0));
constexpr RegisterMask NoneRegisterMask(0);
constexpr RegisterMask Register::operator | (Register o) const {
return RegisterMask(*this) | o;
}
class RegisterIterator;
class BoundedRegisterMask : public RegisterMask {
public:
uint8_t start;
uint8_t limit;
BoundedRegisterMask(RegisterMask mask)
: RegisterMask(mask), start(mask.begin()), limit(mask.end())
{
}
RegisterIterator begin() const;
RegisterIterator end() const;
};
class RegisterIterator {
public:
int index;
int direction;
int limit;
const RegisterMask mask;
RegisterIterator(int index, int direction, int limit, RegisterMask mask)
: index(index), direction(direction), limit(limit), mask(mask)
{
}
bool operator !=(const RegisterIterator& o) const {
return index != o.index;
}
Register operator *() {
return Register(index);
}
void operator ++ () {
if(index != limit) {
index += direction;
}
while(index != limit && !mask.contains(Register(index))) {
index += direction;
}
}
};
inline RegisterIterator BoundedRegisterMask::begin() const {
// We use reverse iteration... for some reason.
return RegisterIterator(limit - 1, -1, start - 1, *this);
}
inline RegisterIterator BoundedRegisterMask::end() const {
// We use reverse iteration... for some reason.
return RegisterIterator(start - 1, -1, start - 1, *this);
}
inline RegisterIterator begin(BoundedRegisterMask mask) {
return mask.begin();
}
inline RegisterIterator end(BoundedRegisterMask mask) {
return mask.end();
}
class RegisterFile { class RegisterFile {
public: public:
RegisterMask allRegisters; BoundedRegisterMask allRegisters;
RegisterMask generalRegisters; BoundedRegisterMask generalRegisters;
RegisterMask floatRegisters; BoundedRegisterMask floatRegisters;
inline RegisterFile(uint32_t generalRegisterMask, uint32_t floatRegisterMask) RegisterFile(RegisterMask generalRegisterMask, RegisterMask floatRegisterMask)
: allRegisters(generalRegisterMask | floatRegisterMask), : allRegisters(generalRegisterMask | floatRegisterMask),
generalRegisters(generalRegisterMask), generalRegisters(generalRegisterMask),
floatRegisters(floatRegisterMask) floatRegisters(floatRegisterMask)
@ -45,31 +206,6 @@ class RegisterFile {
} }
}; };
class RegisterIterator {
public:
int index;
const RegisterMask& mask;
inline RegisterIterator(const RegisterMask& mask)
: index(mask.start), mask(mask)
{
}
inline bool hasNext()
{
return index < mask.limit;
}
inline int next()
{
int r = index;
do {
index++;
} while (index < mask.limit && !(mask.mask & (1 << index)));
return r;
}
};
} // namespace codegen } // namespace codegen
} // namespace avian } // namespace avian

View File

@ -118,6 +118,7 @@ class PlatformInfo {
x86 = AVIAN_ARCH_X86, x86 = AVIAN_ARCH_X86,
x86_64 = AVIAN_ARCH_X86_64, x86_64 = AVIAN_ARCH_X86_64,
Arm = AVIAN_ARCH_ARM, Arm = AVIAN_ARCH_ARM,
Arm64 = AVIAN_ARCH_ARM64,
UnknownArch = AVIAN_ARCH_UNKNOWN UnknownArch = AVIAN_ARCH_UNKNOWN
}; };

View File

@ -7,7 +7,8 @@ build-arch := $(shell uname -m \
| sed 's/^i.86$$/i386/' \ | sed 's/^i.86$$/i386/' \
| sed 's/^x86pc$$/i386/' \ | sed 's/^x86pc$$/i386/' \
| sed 's/amd64/x86_64/' \ | sed 's/amd64/x86_64/' \
| sed 's/^arm.*$$/arm/') | sed 's/^arm.*$$/arm/' \
| sed 's/aarch64/arm64/')
build-platform := \ build-platform := \
$(shell uname -s | tr [:upper:] [:lower:] \ $(shell uname -s | tr [:upper:] [:lower:] \
@ -62,8 +63,8 @@ ifeq ($(filter compile interpret,$(process)),)
x := $(error "'$(process)' is not a valid process (choose one of: compile interpret)") x := $(error "'$(process)' is not a valid process (choose one of: compile interpret)")
endif endif
ifeq ($(filter x86_64 i386 arm,$(arch)),) ifeq ($(filter x86_64 i386 arm arm64,$(arch)),)
x := $(error "'$(arch)' is not a supported architecture (choose one of: x86_64 i386 arm)") x := $(error "'$(arch)' is not a supported architecture (choose one of: x86_64 i386 arm arm64)")
endif endif
ifeq ($(platform),darwin) ifeq ($(platform),darwin)
@ -79,14 +80,14 @@ ifeq ($(filter linux windows macosx ios freebsd,$(platform)),)
endif endif
ifeq ($(platform),macosx) ifeq ($(platform),macosx)
ifeq ($(arch),arm) ifneq ($(filter arm arm64,$(arch)),)
x := $(error "please use 'arch=arm' 'platform=ios' to build for ios-arm") x := $(error "please use ('arch=arm' or 'arch=arm64') 'platform=ios' to build for ios-arm")
endif endif
endif endif
ifeq ($(platform),ios) ifeq ($(platform),ios)
ifeq ($(filter arm i386,$(arch)),) ifeq ($(filter i386 arm arm64,$(arch)),)
x := $(error "please specify 'arch=i386' or 'arch=arm' with 'platform=ios'") x := $(error "please specify 'arch=i386', 'arch=arm', or 'arch=arm64' with 'platform=ios'")
endif endif
endif endif
@ -542,15 +543,15 @@ codeimage-symbols = _binary_codeimage_bin_start:_binary_codeimage_bin_end
developer-dir := $(shell if test -d /Developer/Platforms/$(target).platform/Developer/SDKs; then echo /Developer; \ developer-dir := $(shell if test -d /Developer/Platforms/$(target).platform/Developer/SDKs; then echo /Developer; \
else echo /Applications/Xcode.app/Contents/Developer; fi) else echo /Applications/Xcode.app/Contents/Developer; fi)
ifeq ($(arch),i386) ifneq (,$(filter i386 arm,$(arch)))
pointer-size = 4 pointer-size = 4
endif endif
ifeq ($(arch),arm) ifneq (,$(filter arm arm64,$(arch)))
asm = arm asm = arm
pointer-size = 4
ifneq ($(platform),ios) ifneq ($(platform),ios)
ifneq ($(arch),arm64)
no-psabi = -Wno-psabi no-psabi = -Wno-psabi
cflags += -marm $(no-psabi) cflags += -marm $(no-psabi)
@ -558,9 +559,17 @@ ifeq ($(arch),arm)
# non-iOS platforms. Ideally, we'd detect this at runtime. # non-iOS platforms. Ideally, we'd detect this at runtime.
armv6=true armv6=true
endif endif
endif
ifneq ($(arch),$(build-arch)) ifneq ($(arch),$(build-arch))
ifneq ($(kernel),darwin) ifneq ($(kernel),darwin)
ifeq ($(arch),arm64)
cxx = aarch64-linux-gnu-g++
cc = aarch64-linux-gnu-gcc
ar = aarch64-linux-gnu-ar
ranlib = aarch64-linux-gnu-ranlib
strip = aarch64-linux-gnu-strip
else
cxx = arm-linux-gnueabi-g++ cxx = arm-linux-gnueabi-g++
cc = arm-linux-gnueabi-gcc cc = arm-linux-gnueabi-gcc
ar = arm-linux-gnueabi-ar ar = arm-linux-gnueabi-ar
@ -569,6 +578,7 @@ ifeq ($(arch),arm)
endif endif
endif endif
endif endif
endif
ifeq ($(armv6),true) ifeq ($(armv6),true)
cflags += -DAVIAN_ASSUME_ARMV6 cflags += -DAVIAN_ASSUME_ARMV6
@ -743,7 +753,11 @@ ifeq ($(kernel),darwin)
else else
target = iPhoneOS target = iPhoneOS
sdk = iphoneos$(ios-version) sdk = iphoneos$(ios-version)
ifeq ($(arch),arm)
arch-flag = -arch armv7 arch-flag = -arch armv7
else
arch-flag = -arch arm64
endif
release = Release-iphoneos release = Release-iphoneos
endif endif
@ -751,7 +765,8 @@ ifeq ($(kernel),darwin)
sdk-dir = $(platform-dir)/Developer/SDKs sdk-dir = $(platform-dir)/Developer/SDKs
ios-version := $(shell \ ios-version := $(shell \
if test -d $(sdk-dir)/$(target)8.0.sdk; then echo 8.0; \ if test -d $(sdk-dir)/$(target)8.1.sdk; then echo 8.1; \
elif test -d $(sdk-dir)/$(target)8.0.sdk; then echo 8.0; \
elif test -d $(sdk-dir)/$(target)7.1.sdk; then echo 7.1; \ elif test -d $(sdk-dir)/$(target)7.1.sdk; then echo 7.1; \
elif test -d $(sdk-dir)/$(target)7.0.sdk; then echo 7.0; \ elif test -d $(sdk-dir)/$(target)7.0.sdk; then echo 7.0; \
elif test -d $(sdk-dir)/$(target)6.1.sdk; then echo 6.1; \ elif test -d $(sdk-dir)/$(target)6.1.sdk; then echo 6.1; \
@ -1213,7 +1228,7 @@ vm-sources = \
$(src)/jnienv.cpp \ $(src)/jnienv.cpp \
$(src)/process.cpp $(src)/process.cpp
vm-asm-sources = $(src)/$(asm).$(asm-format) vm-asm-sources = $(src)/$(arch).$(asm-format)
target-asm = $(asm) target-asm = $(asm)
@ -1230,7 +1245,6 @@ compiler-sources = \
$(src)/codegen/compiler.cpp \ $(src)/codegen/compiler.cpp \
$(wildcard $(src)/codegen/compiler/*.cpp) \ $(wildcard $(src)/codegen/compiler/*.cpp) \
$(src)/debug-util.cpp \ $(src)/debug-util.cpp \
$(src)/codegen/registers.cpp \
$(src)/codegen/runtime.cpp \ $(src)/codegen/runtime.cpp \
$(src)/codegen/targets.cpp \ $(src)/codegen/targets.cpp \
$(src)/util/fixed-allocator.cpp $(src)/util/fixed-allocator.cpp
@ -1256,7 +1270,7 @@ ifeq ($(process),compile)
vm-sources += $(native-assembler-sources) vm-sources += $(native-assembler-sources)
endif endif
ifeq ($(codegen-targets),all) ifeq ($(codegen-targets),all)
ifeq ($(arch),arm) ifneq (,$(filter arm arm64,$(arch)))
# The x86 jit has a dependency on the x86 assembly code, # The x86 jit has a dependency on the x86 assembly code,
# and thus can't be successfully built on non-x86 platforms. # and thus can't be successfully built on non-x86 platforms.
vm-sources += $(native-assembler-sources) vm-sources += $(native-assembler-sources)
@ -1265,7 +1279,7 @@ ifeq ($(process),compile)
endif endif
endif endif
vm-asm-sources += $(src)/compile-$(asm).$(asm-format) vm-asm-sources += $(src)/compile-$(arch).$(asm-format)
endif endif
cflags += -DAVIAN_PROCESS_$(process) cflags += -DAVIAN_PROCESS_$(process)
ifeq ($(aot-only),true) ifeq ($(aot-only),true)
@ -1533,6 +1547,10 @@ ifeq ($(target-arch),arm)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM
endif endif
ifeq ($(target-arch),arm64)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM64
endif
ifeq ($(target-format),elf) ifeq ($(target-format),elf)
cflags += -DAVIAN_TARGET_FORMAT=AVIAN_FORMAT_ELF cflags += -DAVIAN_TARGET_FORMAT=AVIAN_FORMAT_ELF
endif endif
@ -1659,7 +1677,8 @@ $(classpath-dep): $(classpath-sources) $(classpath-jar-dep)
@echo "compiling classpath classes" @echo "compiling classpath classes"
@mkdir -p $(classpath-build) @mkdir -p $(classpath-build)
classes="$(shell $(MAKE) -s --no-print-directory build=$(build) \ classes="$(shell $(MAKE) -s --no-print-directory build=$(build) \
$(classpath-classes))"; if [ -n "$${classes}" ]; then \ $(classpath-classes) arch=$(build-arch) platform=$(build-platform))"; \
if [ -n "$${classes}" ]; then \
$(javac) -source 1.6 -target 1.6 \ $(javac) -source 1.6 -target 1.6 \
-d $(classpath-build) -bootclasspath $(boot-classpath) \ -d $(classpath-build) -bootclasspath $(boot-classpath) \
$${classes}; fi $${classes}; fi

148
src/arm64.S Normal file
View File

@ -0,0 +1,148 @@
/* arm.S: JNI gluecode for ARM
Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
.text
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.globl GLOBAL(vmNativeCall)
.align 2
GLOBAL(vmNativeCall):
// arguments:
// x0 -> x19 : function
// w1 -> w20 : stackTotal
// x2 : memoryTable
// w3 : memoryCount
// x4 -> x21 : gprTable
// x5 -> x22 : vfpTable
// w6 -> w23 : returnType
// allocate frame
stp x29, x30, [sp,#-64]!
mov x29, sp
// save callee-saved register values so we can clobber them
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
str x23, [sp,#48]
// move arguments into callee-saved registers
mov x19, x0
mov w20, w1
mov x21, x4
mov x22, x5
mov w23, w6
// setup stack arguments if necessary
sub sp, sp, w20, uxtw // allocate stack
mov x9, sp
LOCAL(loop):
cmp w3, wzr
b.eq LOCAL(populateGPRs)
ldr x0, [x2], #8
str x0, [x9], #8
sub w3, w3, #8
b LOCAL(loop)
LOCAL(populateGPRs):
cmp x21, xzr
b.eq LOCAL(populateVFPs)
ldp x0, x1, [x21]
ldp x2, x3, [x21,#16]
ldp x4, x5, [x21,#32]
ldp x6, x7, [x21,#48]
LOCAL(populateVFPs):
cmp x22, xzr
b.eq LOCAL(doCall)
ldp d0, d1, [x22]
ldp d2, d3, [x22,#16]
ldp d4, d5, [x22,#32]
ldp d6, d7, [x22,#48]
LOCAL(doCall):
blr x19 // call function
add sp, sp, w20, uxtw // deallocate stack
cmp w23,#FLOAT_TYPE
b.ne LOCAL(double)
fmov w0,s0
b LOCAL(exit)
LOCAL(double):
cmp w23,#DOUBLE_TYPE
b.ne LOCAL(exit)
fmov x0,d0
LOCAL(exit):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldr x23, [sp,#48]
ldp x29, x30, [sp],#64
ret
.globl GLOBAL(vmJump)
.align 2
GLOBAL(vmJump):
mov x30, x0
mov x0, x4
mov x1, x5
mov sp, x2
mov x19, x3
br x30
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
.globl GLOBAL(vmRun)
.align 2
GLOBAL(vmRun):
// x0: function
// x1: arguments
// x2: checkpoint
// allocate frame
stp x29, x30, [sp,#-96]!
mov x29, sp
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
mov x19, sp
str x19, [x2, #CHECKPOINT_STACK]
mov x19, x0
ldr x0, [x2, #CHECKPOINT_THREAD]
blr x19
.globl GLOBAL(vmRun_returnAddress)
.align 2
GLOBAL(vmRun_returnAddress):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
br x30

View File

@ -43,7 +43,7 @@ inline void compileTimeMemoryBarrier()
#if (defined ARCH_x86_32) || (defined ARCH_x86_64) #if (defined ARCH_x86_32) || (defined ARCH_x86_64)
#include "x86.h" #include "x86.h"
#elif defined ARCH_arm #elif (defined ARCH_arm) || (defined ARCH_arm64)
#include "arm.h" #include "arm.h"
#else #else
#error unsupported architecture #error unsupported architecture

View File

@ -34,7 +34,11 @@
#define THREAD_STATE_IP(state) ((state).FIELD(pc)) #define THREAD_STATE_IP(state) ((state).FIELD(pc))
#define THREAD_STATE_STACK(state) ((state).FIELD(sp)) #define THREAD_STATE_STACK(state) ((state).FIELD(sp))
#if (defined __APPLE__) && (defined ARCH_arm64)
#define THREAD_STATE_THREAD(state) ((state).FIELD(x[19]))
#else
#define THREAD_STATE_THREAD(state) ((state).FIELD(r[8])) #define THREAD_STATE_THREAD(state) ((state).FIELD(r[8]))
#endif
#define THREAD_STATE_LINK(state) ((state).FIELD(lr)) #define THREAD_STATE_LINK(state) ((state).FIELD(lr))
#define IP_REGISTER(context) THREAD_STATE_IP(context->uc_mcontext->FIELD(ss)) #define IP_REGISTER(context) THREAD_STATE_IP(context->uc_mcontext->FIELD(ss))
@ -53,10 +57,17 @@
#define THREAD_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_IP]) #define THREAD_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_IP])
#define LINK_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_LR]) #define LINK_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_LR])
#else #else
#ifdef ARCH_arm
#define IP_REGISTER(context) (context->uc_mcontext.arm_pc) #define IP_REGISTER(context) (context->uc_mcontext.arm_pc)
#define STACK_REGISTER(context) (context->uc_mcontext.arm_sp) #define STACK_REGISTER(context) (context->uc_mcontext.arm_sp)
#define THREAD_REGISTER(context) (context->uc_mcontext.arm_ip) #define THREAD_REGISTER(context) (context->uc_mcontext.arm_ip)
#define LINK_REGISTER(context) (context->uc_mcontext.arm_lr) #define LINK_REGISTER(context) (context->uc_mcontext.arm_lr)
#else
#define IP_REGISTER(context) (context->uc_mcontext.pc)
#define STACK_REGISTER(context) (context->uc_mcontext.sp)
#define THREAD_REGISTER(context) (context->uc_mcontext.regs[19])
#define LINK_REGISTER(context) (context->uc_mcontext.regs[30])
#endif
#endif #endif
#define VA_LIST(x) (&(x)) #define VA_LIST(x) (&(x))
@ -76,7 +87,7 @@ inline void trap()
#ifdef _MSC_VER #ifdef _MSC_VER
__debugbreak(); __debugbreak();
#else #else
asm("bkpt"); asm("brk 0");
#endif #endif
} }
@ -162,6 +173,8 @@ inline bool atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
old, new_, reinterpret_cast<int32_t*>(p)); old, new_, reinterpret_cast<int32_t*>(p));
#elif(defined __QNX__) #elif(defined __QNX__)
return old == _smp_cmpxchg(p, old, new_); return old == _smp_cmpxchg(p, old, new_);
#elif (defined ARCH_arm64)
return __sync_bool_compare_and_swap(p, old, new_);
#else #else
int r = __kernel_cmpxchg( int r = __kernel_cmpxchg(
static_cast<int>(old), static_cast<int>(new_), reinterpret_cast<int*>(p)); static_cast<int>(old), static_cast<int>(new_), reinterpret_cast<int*>(p));
@ -169,10 +182,22 @@ inline bool atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
#endif #endif
} }
#ifdef ARCH_arm64
inline bool atomicCompareAndSwap64(uint64_t* p, uint64_t old, uint64_t new_)
{
return __sync_bool_compare_and_swap(p, old, new_);
}
inline bool atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{
return atomicCompareAndSwap64(reinterpret_cast<uint64_t*>(p), old, new_);
}
#else
inline bool atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_) inline bool atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{ {
return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_); return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_);
} }
#endif
inline uint64_t dynamicCall(void* function, inline uint64_t dynamicCall(void* function,
uintptr_t* arguments, uintptr_t* arguments,
@ -181,17 +206,17 @@ inline uint64_t dynamicCall(void* function,
unsigned argumentsSize UNUSED, unsigned argumentsSize UNUSED,
unsigned returnType) unsigned returnType)
{ {
#ifdef __APPLE__ #if (defined __APPLE__) || (defined ARCH_arm64)
const unsigned Alignment = 1; const unsigned Alignment = 1;
#else #else
const unsigned Alignment = 2; const unsigned Alignment = 2;
#endif #endif
const unsigned GprCount = 4; const unsigned GprCount = BytesPerWord;
uintptr_t gprTable[GprCount]; uintptr_t gprTable[GprCount];
unsigned gprIndex = 0; unsigned gprIndex = 0;
const unsigned VfpCount = 16; const unsigned VfpCount = BytesPerWord == 8 ? 8 : 16;
uintptr_t vfpTable[VfpCount]; uintptr_t vfpTable[VfpCount];
unsigned vfpIndex = 0; unsigned vfpIndex = 0;
unsigned vfpBackfillIndex UNUSED = 0; unsigned vfpBackfillIndex UNUSED = 0;
@ -206,7 +231,7 @@ inline uint64_t dynamicCall(void* function,
for (unsigned ati = 0; ati < argumentCount; ++ati) { for (unsigned ati = 0; ati < argumentCount; ++ati) {
switch (argumentTypes[ati]) { switch (argumentTypes[ati]) {
case DOUBLE_TYPE: case DOUBLE_TYPE:
#if defined(__ARM_PCS_VFP) #if (defined __ARM_PCS_VFP) || (defined ARCH_arm64)
{ {
if (vfpIndex + Alignment <= VfpCount) { if (vfpIndex + Alignment <= VfpCount) {
if (vfpIndex % Alignment) { if (vfpIndex % Alignment) {

View File

@ -611,11 +611,11 @@ void intercept(Thread* t,
if (m) { if (m) {
PROTECT(t, m); PROTECT(t, m);
m->flags() |= ACC_NATIVE;
if (updateRuntimeData) { if (updateRuntimeData) {
GcMethod* clone = methodClone(t, m); GcMethod* clone = methodClone(t, m);
m->flags() |= ACC_NATIVE;
// make clone private to prevent vtable updates at compilation // make clone private to prevent vtable updates at compilation
// time. Otherwise, our interception might be bypassed by calls // time. Otherwise, our interception might be bypassed by calls
// through the vtable. // through the vtable.
@ -628,6 +628,8 @@ void intercept(Thread* t,
GcMethodRuntimeData* runtimeData = getMethodRuntimeData(t, m); GcMethodRuntimeData* runtimeData = getMethodRuntimeData(t, m);
runtimeData->setNative(t, native->as<GcNative>(t)); runtimeData->setNative(t, native->as<GcNative>(t));
} else {
m->flags() |= ACC_NATIVE;
} }
} else { } else {
// If we can't find the method, just ignore it, since ProGuard may // If we can't find the method, just ignore it, since ProGuard may

View File

@ -116,6 +116,8 @@ typedef intptr_t intptr_alias_t;
#define ARCH_x86_64 #define ARCH_x86_64
#elif defined __arm__ #elif defined __arm__
#define ARCH_arm #define ARCH_arm
#elif defined __aarch64__
#define ARCH_arm64
#else #else
#error "unsupported architecture" #error "unsupported architecture"
#endif #endif

View File

@ -28,5 +28,6 @@
#define AVIAN_ARCH_X86 (1 << 8) #define AVIAN_ARCH_X86 (1 << 8)
#define AVIAN_ARCH_X86_64 (2 << 8) #define AVIAN_ARCH_X86_64 (2 << 8)
#define AVIAN_ARCH_ARM (3 << 8) #define AVIAN_ARCH_ARM (3 << 8)
#define AVIAN_ARCH_ARM64 (4 << 8)
#endif #endif

View File

@ -1576,6 +1576,10 @@ int64_t JNICALL
ZipFile::Entry* find(ZipFile* file, const char* path, unsigned pathLength) ZipFile::Entry* find(ZipFile* file, const char* path, unsigned pathLength)
{ {
if (pathLength > 0 && path[0] == '/') {
++path;
--pathLength;
}
unsigned i = hash(path) & (file->indexSize - 1); unsigned i = hash(path) & (file->indexSize - 1);
for (ZipFile::Entry* e = file->index[i]; e; e = e->next) { for (ZipFile::Entry* e = file->index[i]; e; e = e->next) {
const uint8_t* p = e->start; const uint8_t* p = e->start;
@ -1601,13 +1605,17 @@ int64_t JNICALL
memcpy(RUNTIME_ARRAY_BODY(p), path->body().begin(), path->length()); memcpy(RUNTIME_ARRAY_BODY(p), path->body().begin(), path->length());
RUNTIME_ARRAY_BODY(p)[path->length()] = 0; RUNTIME_ARRAY_BODY(p)[path->length()] = 0;
replace('\\', '/', RUNTIME_ARRAY_BODY(p)); replace('\\', '/', RUNTIME_ARRAY_BODY(p));
if (addSlash) {
ZipFile::Entry *e = find(file, RUNTIME_ARRAY_BODY(p), path->length());
if (e == 0 and addSlash and RUNTIME_ARRAY_BODY(p)[path->length()] != '/') {
RUNTIME_ARRAY_BODY(p)[path->length()] = '/'; RUNTIME_ARRAY_BODY(p)[path->length()] = '/';
RUNTIME_ARRAY_BODY(p)[path->length() + 1] = 0; RUNTIME_ARRAY_BODY(p)[path->length() + 1] = 0;
e = find(file, RUNTIME_ARRAY_BODY(p), path->length());
} }
return reinterpret_cast<int64_t>( return reinterpret_cast<int64_t>(e);
find(file, RUNTIME_ARRAY_BODY(p), path->length()));
} else { } else {
int64_t entry int64_t entry
= cast<GcLong>(t, = cast<GcLong>(t,
@ -1792,10 +1800,10 @@ void JNICALL freeZipFileEntry(Thread* t, GcMethod* method, uintptr_t* arguments)
0, 0,
file->file, file->file,
entry->entry); entry->entry);
}
t->m->heap->free(entry, sizeof(ZipFile::Entry)); t->m->heap->free(entry, sizeof(ZipFile::Entry));
} }
}
int64_t JNICALL int64_t JNICALL
readZipFileEntry(Thread* t, GcMethod* method, uintptr_t* arguments) readZipFileEntry(Thread* t, GcMethod* method, uintptr_t* arguments)

View File

@ -1,6 +1,5 @@
add_library (avian_codegen add_library (avian_codegen
compiler.cpp compiler.cpp
registers.cpp
runtime.cpp runtime.cpp
targets.cpp targets.cpp

View File

@ -256,10 +256,10 @@ Site* pickTargetSite(Context* c,
expect(c, target.cost < Target::Impossible); expect(c, target.cost < Target::Impossible);
if (target.type == lir::MemoryOperand) { if (target.type == lir::Operand::Type::Memory) {
return frameSite(c, target.index); return frameSite(c, target.index);
} else { } else {
return registerSite(c, target.index); return registerSite(c, Register(target.index));
} }
} }
@ -342,7 +342,7 @@ Site* maybeMove(Context* c,
OperandMask src; OperandMask src;
OperandMask tmp; OperandMask tmp;
c->arch->planMove( c->arch->planMove(
size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask)); size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask, 0));
SiteMask srcMask = SiteMask::lowPart(src); SiteMask srcMask = SiteMask::lowPart(src);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) { for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
@ -369,7 +369,7 @@ Site* maybeMove(Context* c,
size, size,
src, src,
tmp, tmp,
OperandMask(1 << dstSite->type(c), dstSite->registerMask(c))); OperandMask(1 << (unsigned)dstSite->type(c), dstSite->registerMask(c), 0));
SiteMask srcMask = SiteMask::lowPart(src); SiteMask srcMask = SiteMask::lowPart(src);
unsigned cost = 0xFFFFFFFF; unsigned cost = 0xFFFFFFFF;
@ -514,15 +514,15 @@ void steal(Context* c, Resource* r, Value* thief)
SiteMask generalRegisterMask(Context* c) SiteMask generalRegisterMask(Context* c)
{ {
return SiteMask(1 << lir::RegisterOperand, return SiteMask(lir::Operand::RegisterPairMask,
c->regFile->generalRegisters.mask, c->regFile->generalRegisters,
NoFrameIndex); NoFrameIndex);
} }
SiteMask generalRegisterOrConstantMask(Context* c) SiteMask generalRegisterOrConstantMask(Context* c)
{ {
return SiteMask((1 << lir::RegisterOperand) | (1 << lir::ConstantOperand), return SiteMask(lir::Operand::RegisterPairMask | lir::Operand::ConstantMask,
c->regFile->generalRegisters.mask, c->regFile->generalRegisters,
NoFrameIndex); NoFrameIndex);
} }
@ -616,11 +616,11 @@ bool isHome(Value* v, int frameIndex)
bool acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask) bool acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask)
{ {
if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) { if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) {
if (s->type(c) == lir::RegisterOperand) { if (s->type(c) == lir::Operand::Type::RegisterPair) {
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount; return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
} else { } else {
assertT(c, assertT(c,
s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))); s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex)));
return isHome(read->value, return isHome(read->value,
offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset)); offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset));
@ -698,7 +698,7 @@ void apply(Context* c,
{ {
assertT(c, s1Low->type(c) == s1High->type(c)); assertT(c, s1Low->type(c) == s1High->type(c));
lir::OperandType s1Type = s1Low->type(c); lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union; OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union); asAssemblerOperand(c, s1Low, s1High, &s1Union);
@ -717,11 +717,11 @@ void apply(Context* c,
assertT(c, s1Low->type(c) == s1High->type(c)); assertT(c, s1Low->type(c) == s1High->type(c));
assertT(c, s2Low->type(c) == s2High->type(c)); assertT(c, s2Low->type(c) == s2High->type(c));
lir::OperandType s1Type = s1Low->type(c); lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union; OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union); asAssemblerOperand(c, s1Low, s1High, &s1Union);
lir::OperandType s2Type = s2Low->type(c); lir::Operand::Type s2Type = s2Low->type(c);
OperandUnion s2Union; OperandUnion s2Union;
asAssemblerOperand(c, s2Low, s2High, &s2Union); asAssemblerOperand(c, s2Low, s2High, &s2Union);
@ -746,15 +746,15 @@ void apply(Context* c,
assertT(c, s2Low->type(c) == s2High->type(c)); assertT(c, s2Low->type(c) == s2High->type(c));
assertT(c, s3Low->type(c) == s3High->type(c)); assertT(c, s3Low->type(c) == s3High->type(c));
lir::OperandType s1Type = s1Low->type(c); lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union; OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union); asAssemblerOperand(c, s1Low, s1High, &s1Union);
lir::OperandType s2Type = s2Low->type(c); lir::Operand::Type s2Type = s2Low->type(c);
OperandUnion s2Union; OperandUnion s2Union;
asAssemblerOperand(c, s2Low, s2High, &s2Union); asAssemblerOperand(c, s2Low, s2High, &s2Union);
lir::OperandType s3Type = s3Low->type(c); lir::Operand::Type s3Type = s3Low->type(c);
OperandUnion s3Union; OperandUnion s3Union;
asAssemblerOperand(c, s3Low, s3High, &s3Union); asAssemblerOperand(c, s3Low, s3High, &s3Union);
@ -782,7 +782,7 @@ void saveLocals(Context* c, Event* e)
e->addRead( e->addRead(
c, c,
local->value, local->value,
SiteMask(1 << lir::MemoryOperand, 0, compiler::frameIndex(c, li))); SiteMask(lir::Operand::MemoryMask, 0, compiler::frameIndex(c, li)));
} }
} }
} }
@ -815,10 +815,10 @@ void maybeMove(Context* c,
if (cost) { if (cost) {
// todo: let c->arch->planMove decide this: // todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand bool useTemporary = ((target->type(c) == lir::Operand::Type::Memory
and srcValue->source->type(c) == lir::MemoryOperand) and srcValue->source->type(c) == lir::Operand::Type::Memory)
or (srcSelectSize < dstSize or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand)); and target->type(c) != lir::Operand::Type::RegisterPair));
srcValue->source->freeze(c, srcValue); srcValue->source->freeze(c, srcValue);
@ -827,7 +827,7 @@ void maybeMove(Context* c,
srcValue->source->thaw(c, srcValue); srcValue->source->thaw(c, srcValue);
bool addOffset = srcSize != srcSelectSize and c->arch->bigEndian() bool addOffset = srcSize != srcSelectSize and c->arch->bigEndian()
and srcValue->source->type(c) == lir::MemoryOperand; and srcValue->source->type(c) == lir::Operand::Type::Memory;
if (addOffset) { if (addOffset) {
static_cast<MemorySite*>(srcValue->source)->offset static_cast<MemorySite*>(srcValue->source)->offset
@ -874,14 +874,14 @@ void maybeMove(Context* c,
c->arch->planSource(op, dstSize, src, dstSize, &thunk); c->arch->planSource(op, dstSize, src, dstSize, &thunk);
if (isGeneralValue(srcValue)) { if (isGeneralValue(srcValue)) {
src.registerMask &= c->regFile->generalRegisters.mask; src.lowRegisterMask &= c->regFile->generalRegisters;
} }
assertT(c, thunk == 0); assertT(c, thunk == 0);
assertT(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand)); assertT(c, dstMask.typeMask & src.typeMask & lir::Operand::RegisterPairMask);
Site* tmpTarget Site* tmpTarget
= freeRegisterSite(c, dstMask.registerMask & src.registerMask); = freeRegisterSite(c, dstMask.registerMask & src.lowRegisterMask);
srcValue->source->freeze(c, srcValue); srcValue->source->freeze(c, srcValue);
@ -1635,8 +1635,8 @@ bool resolveSourceSites(Context* c,
Read* r = live(c, v); Read* r = live(c, v);
if (r and sites[el.localIndex] == 0) { if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand), SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
c->regFile->generalRegisters.mask, c->regFile->generalRegisters,
AnyFrameIndex); AnyFrameIndex);
Site* s = pickSourceSite( Site* s = pickSourceSite(
@ -1677,8 +1677,8 @@ void resolveTargetSites(Context* c,
Read* r = live(c, v); Read* r = live(c, v);
if (r and sites[el.localIndex] == 0) { if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand), SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
c->regFile->generalRegisters.mask, c->regFile->generalRegisters,
AnyFrameIndex); AnyFrameIndex);
Site* s = pickSourceSite( Site* s = pickSourceSite(
@ -2210,24 +2210,24 @@ class Client : public Assembler::Client {
{ {
} }
virtual int acquireTemporary(uint32_t mask) virtual Register acquireTemporary(RegisterMask mask)
{ {
unsigned cost; unsigned cost;
int r = pickRegisterTarget(c, 0, mask, &cost); Register r = pickRegisterTarget(c, 0, mask, &cost);
expect(c, cost < Target::Impossible); expect(c, cost < Target::Impossible);
save(r); save(r);
c->registerResources[r].increment(c); c->registerResources[r.index()].increment(c);
return r; return r;
} }
virtual void releaseTemporary(int r) virtual void releaseTemporary(Register r)
{ {
c->registerResources[r].decrement(c); c->registerResources[r.index()].decrement(c);
} }
virtual void save(int r) virtual void save(Register r)
{ {
RegisterResource* reg = c->registerResources + r; RegisterResource* reg = c->registerResources + r.index();
assertT(c, reg->referenceCount == 0); assertT(c, reg->referenceCount == 0);
assertT(c, reg->freezeCount == 0); assertT(c, reg->freezeCount == 0);

View File

@ -53,19 +53,15 @@ Context::Context(vm::System* system,
- regFile->generalRegisters.start), - regFile->generalRegisters.start),
targetInfo(arch->targetInfo()) targetInfo(arch->targetInfo())
{ {
for (unsigned i = regFile->generalRegisters.start; for (Register i : regFile->generalRegisters) {
i < regFile->generalRegisters.limit; new (registerResources + i.index()) RegisterResource(arch->reserved(i));
++i) {
new (registerResources + i) RegisterResource(arch->reserved(i));
if (registerResources[i].reserved) { if (registerResources[i.index()].reserved) {
--availableGeneralRegisterCount; --availableGeneralRegisterCount;
} }
} }
for (unsigned i = regFile->floatRegisters.start; for (Register i : regFile->floatRegisters) {
i < regFile->floatRegisters.limit; new (registerResources + i.index()) RegisterResource(arch->reserved(i));
++i) {
new (registerResources + i) RegisterResource(arch->reserved(i));
} }
} }

View File

@ -372,7 +372,7 @@ class CallEvent : public Event {
? arguments.count ? arguments.count
: 0) : 0)
{ {
uint32_t registerMask = c->regFile->generalRegisters.mask; RegisterMask registerMask = c->regFile->generalRegisters;
if (callingConvention == ir::CallingConvention::Native) { if (callingConvention == ir::CallingConvention::Native) {
assertT(c, (flags & Compiler::TailJump) == 0); assertT(c, (flags & Compiler::TailJump) == 0);
@ -396,14 +396,14 @@ class CallEvent : public Event {
SiteMask targetMask; SiteMask targetMask;
if (index + (c->arch->argumentRegisterAlignment() ? footprint : 1) if (index + (c->arch->argumentRegisterAlignment() ? footprint : 1)
<= c->arch->argumentRegisterCount()) { <= c->arch->argumentRegisterCount()) {
int number = c->arch->argumentRegister(index); Register number = c->arch->argumentRegister(index);
if (DebugReads) { if (DebugReads) {
fprintf(stderr, "reg %d arg read %p\n", number, v); fprintf(stderr, "reg %d arg read %p\n", number.index(), v);
} }
targetMask = SiteMask::fixedRegisterMask(number); targetMask = SiteMask::fixedRegisterMask(number);
registerMask &= ~(1 << number); registerMask = registerMask.excluding(number);
} else { } else {
if (index < c->arch->argumentRegisterCount()) { if (index < c->arch->argumentRegisterCount()) {
index = c->arch->argumentRegisterCount(); index = c->arch->argumentRegisterCount();
@ -415,7 +415,7 @@ class CallEvent : public Event {
fprintf(stderr, "stack %d arg read %p\n", frameIndex, v); fprintf(stderr, "stack %d arg read %p\n", frameIndex, v);
} }
targetMask = SiteMask(1 << lir::MemoryOperand, 0, frameIndex); targetMask = SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
} }
this->addRead(c, v, targetMask); this->addRead(c, v, targetMask);
@ -445,7 +445,7 @@ class CallEvent : public Event {
this->addRead( this->addRead(
c, c,
address, address,
SiteMask(op.typeMask, registerMask & op.registerMask, AnyFrameIndex)); SiteMask(op.typeMask, registerMask & op.lowRegisterMask, AnyFrameIndex));
} }
Stack* stack = stackBefore; Stack* stack = stackBefore;
@ -512,7 +512,7 @@ class CallEvent : public Event {
this->addRead(c, v, generalRegisterMask(c)); this->addRead(c, v, generalRegisterMask(c));
} else { } else {
this->addRead( this->addRead(
c, v, SiteMask(1 << lir::MemoryOperand, 0, frameIndex)); c, v, SiteMask(lir::Operand::MemoryMask, 0, frameIndex));
} }
} }
} }
@ -544,7 +544,7 @@ class CallEvent : public Event {
this->addRead(c, this->addRead(c,
stack->value, stack->value,
SiteMask(1 << lir::MemoryOperand, 0, logicalIndex)); SiteMask(lir::Operand::MemoryMask, 0, logicalIndex));
} }
stack = stack->next; stack = stack->next;
@ -581,29 +581,29 @@ class CallEvent : public Event {
assertT( assertT(
c, c,
returnAddressSurrogate == 0 returnAddressSurrogate == 0
or returnAddressSurrogate->source->type(c) == lir::RegisterOperand); or returnAddressSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
assertT( assertT(
c, c,
framePointerSurrogate == 0 framePointerSurrogate == 0
or framePointerSurrogate->source->type(c) == lir::RegisterOperand); or framePointerSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
int ras; Register ras;
if (returnAddressSurrogate) { if (returnAddressSurrogate) {
returnAddressSurrogate->source->freeze(c, returnAddressSurrogate); returnAddressSurrogate->source->freeze(c, returnAddressSurrogate);
ras = static_cast<RegisterSite*>(returnAddressSurrogate->source) ras = static_cast<RegisterSite*>(returnAddressSurrogate->source)
->number; ->number;
} else { } else {
ras = lir::NoRegister; ras = NoRegister;
} }
int fps; Register fps;
if (framePointerSurrogate) { if (framePointerSurrogate) {
framePointerSurrogate->source->freeze(c, framePointerSurrogate); framePointerSurrogate->source->freeze(c, framePointerSurrogate);
fps = static_cast<RegisterSite*>(framePointerSurrogate->source)->number; fps = static_cast<RegisterSite*>(framePointerSurrogate->source)->number;
} else { } else {
fps = lir::NoRegister; fps = NoRegister;
} }
int offset = static_cast<int>(footprint) int offset = static_cast<int>(footprint)
@ -783,9 +783,9 @@ class MoveEvent : public Event {
op, op,
srcSelectSize, srcSelectSize,
OperandMask( OperandMask(
1 << srcValue->source->type(c), 1 << (unsigned)srcValue->source->type(c),
(static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c)) srcValue->source->registerMask(c),
<< 32) | static_cast<uint64_t>(srcValue->source->registerMask(c))), srcValue->nextWord->source->registerMask(c)),
dstSize, dstSize,
dst); dst);
@ -866,7 +866,7 @@ class MoveEvent : public Event {
assertT(c, srcSelectSize == c->targetInfo.pointerSize); assertT(c, srcSelectSize == c->targetInfo.pointerSize);
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) { if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
assertT(c, dstLowMask.typeMask & (1 << lir::RegisterOperand)); assertT(c, dstLowMask.typeMask & lir::Operand::RegisterPairMask);
Site* low = freeRegisterSite(c, dstLowMask.registerMask); Site* low = freeRegisterSite(c, dstLowMask.registerMask);
@ -897,7 +897,7 @@ class MoveEvent : public Event {
srcValue->source->thaw(c, srcValue); srcValue->source->thaw(c, srcValue);
assertT(c, dstHighMask.typeMask & (1 << lir::RegisterOperand)); assertT(c, dstHighMask.typeMask & lir::Operand::RegisterPairMask);
Site* high = freeRegisterSite(c, dstHighMask.registerMask); Site* high = freeRegisterSite(c, dstHighMask.registerMask);
@ -1126,18 +1126,14 @@ class CombineEvent : public Event {
op, op,
firstValue->type.size(c->targetInfo), firstValue->type.size(c->targetInfo),
OperandMask( OperandMask(
1 << firstValue->source->type(c), 1 << (unsigned)firstValue->source->type(c),
(static_cast<uint64_t>( firstValue->source->registerMask(c),
firstValue->nextWord->source->registerMask(c)) firstValue->nextWord->source->registerMask(c)),
<< 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
secondValue->type.size(c->targetInfo), secondValue->type.size(c->targetInfo),
OperandMask( OperandMask(
1 << secondValue->source->type(c), 1 << (unsigned)secondValue->source->type(c),
(static_cast<uint64_t>( secondValue->source->registerMask(c),
secondValue->nextWord->source->registerMask(c)) secondValue->nextWord->source->registerMask(c)),
<< 32)
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
resultValue->type.size(c->targetInfo), resultValue->type.size(c->targetInfo),
cMask); cMask);
@ -1318,11 +1314,9 @@ class TranslateEvent : public Event {
op, op,
firstValue->type.size(c->targetInfo), firstValue->type.size(c->targetInfo),
OperandMask( OperandMask(
1 << firstValue->source->type(c), 1 << (unsigned)firstValue->source->type(c),
(static_cast<uint64_t>( firstValue->source->registerMask(c),
firstValue->nextWord->source->registerMask(c)) firstValue->nextWord->source->registerMask(c)),
<< 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
resultValue->type.size(c->targetInfo), resultValue->type.size(c->targetInfo),
bMask); bMask);
@ -1457,7 +1451,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
{ {
for (SiteIterator it(c, v); it.hasMore();) { for (SiteIterator it(c, v); it.hasMore();) {
Site* s = it.next(); Site* s = it.next();
if (s->type(c) == lir::ConstantOperand) { if (s->type(c) == lir::Operand::Type::Constant) {
return static_cast<ConstantSite*>(s); return static_cast<ConstantSite*>(s);
} }
} }
@ -1467,7 +1461,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
void moveIfConflict(Context* c, Value* v, MemorySite* s) void moveIfConflict(Context* c, Value* v, MemorySite* s)
{ {
if (v->reads) { if (v->reads) {
SiteMask mask(1 << lir::RegisterOperand, ~0, AnyFrameIndex); SiteMask mask(lir::Operand::RegisterPairMask, ~0, AnyFrameIndex);
v->reads->intersect(&mask); v->reads->intersect(&mask);
if (s->conflicts(mask)) { if (s->conflicts(mask)) {
maybeMove(c, v->reads, true, false); maybeMove(c, v->reads, true, false);
@ -1504,29 +1498,29 @@ class MemoryEvent : public Event {
virtual void compile(Context* c) virtual void compile(Context* c)
{ {
int indexRegister; Register indexRegister;
int displacement = this->displacement; int displacement = this->displacement;
unsigned scale = this->scale; unsigned scale = this->scale;
if (index) { if (index) {
ConstantSite* constant = findConstantSite(c, index); ConstantSite* constant = findConstantSite(c, index);
if (constant) { if (constant) {
indexRegister = lir::NoRegister; indexRegister = NoRegister;
displacement += (constant->value->value() * scale); displacement += (constant->value->value() * scale);
scale = 1; scale = 1;
} else { } else {
assertT(c, index->source->type(c) == lir::RegisterOperand); assertT(c, index->source->type(c) == lir::Operand::Type::RegisterPair);
indexRegister = static_cast<RegisterSite*>(index->source)->number; indexRegister = static_cast<RegisterSite*>(index->source)->number;
} }
} else { } else {
indexRegister = lir::NoRegister; indexRegister = NoRegister;
} }
assertT(c, base->source->type(c) == lir::RegisterOperand); assertT(c, base->source->type(c) == lir::Operand::Type::RegisterPair);
int baseRegister = static_cast<RegisterSite*>(base->source)->number; Register baseRegister = static_cast<RegisterSite*>(base->source)->number;
popRead(c, this, base); popRead(c, this, base);
if (index) { if (index) {
if (c->targetInfo.pointerSize == 8 and indexRegister != lir::NoRegister) { if (c->targetInfo.pointerSize == 8 and indexRegister != NoRegister) {
apply(c, apply(c,
lir::Move, lir::Move,
4, 4,
@ -1718,9 +1712,9 @@ class BranchEvent : public Event {
OperandMask dstMask; OperandMask dstMask;
c->arch->planDestination(op, c->arch->planDestination(op,
firstValue->type.size(c->targetInfo), firstValue->type.size(c->targetInfo),
OperandMask(0, 0), OperandMask(0, 0, 0),
firstValue->type.size(c->targetInfo), firstValue->type.size(c->targetInfo),
OperandMask(0, 0), OperandMask(0, 0, 0),
c->targetInfo.pointerSize, c->targetInfo.pointerSize,
dstMask); dstMask);
@ -1879,12 +1873,12 @@ void clean(Context* c, Value* v, unsigned popIndex)
{ {
for (SiteIterator it(c, v); it.hasMore();) { for (SiteIterator it(c, v); it.hasMore();) {
Site* s = it.next(); Site* s = it.next();
if (not(s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)) if (not(s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))
and offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset) and offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset)
>= popIndex)) { >= popIndex)) {
if (false if (false
and s->match(c, and s->match(c,
SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))) { SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))) {
char buffer[256]; char buffer[256];
s->toString(c, buffer, 256); s->toString(c, buffer, 256);
fprintf(stderr, fprintf(stderr,
@ -2016,7 +2010,7 @@ class BoundsCheckEvent : public Event {
lir::Constant handlerConstant(resolvedPromise(c, handler)); lir::Constant handlerConstant(resolvedPromise(c, handler));
a->apply(lir::Call, a->apply(lir::Call,
OperandInfo(c->targetInfo.pointerSize, OperandInfo(c->targetInfo.pointerSize,
lir::ConstantOperand, lir::Operand::Type::Constant,
&handlerConstant)); &handlerConstant));
} }
} else { } else {
@ -2038,10 +2032,10 @@ class BoundsCheckEvent : public Event {
} }
if (constant == 0 or constant->value->value() >= 0) { if (constant == 0 or constant->value->value() >= 0) {
assertT(c, object->source->type(c) == lir::RegisterOperand); assertT(c, object->source->type(c) == lir::Operand::Type::RegisterPair);
MemorySite length(static_cast<RegisterSite*>(object->source)->number, MemorySite length(static_cast<RegisterSite*>(object->source)->number,
lengthOffset, lengthOffset,
lir::NoRegister, NoRegister,
1); 1);
length.acquired = true; length.acquired = true;
@ -2072,7 +2066,7 @@ class BoundsCheckEvent : public Event {
lir::Constant handlerConstant(resolvedPromise(c, handler)); lir::Constant handlerConstant(resolvedPromise(c, handler));
a->apply(lir::Call, a->apply(lir::Call,
OperandInfo(c->targetInfo.pointerSize, OperandInfo(c->targetInfo.pointerSize,
lir::ConstantOperand, lir::Operand::Type::Constant,
&handlerConstant)); &handlerConstant));
nextPromise->offset = a->offset(); nextPromise->offset = a->offset();

View File

@ -205,7 +205,7 @@ Read* StubRead::next(Context*)
SingleRead* read(Context* c, const SiteMask& mask, Value* successor) SingleRead* read(Context* c, const SiteMask& mask, Value* successor)
{ {
assertT(c, assertT(c,
(mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0); (mask.typeMask != lir::Operand::MemoryMask) or mask.frameIndex >= 0);
return new (c->zone) SingleRead(mask, successor); return new (c->zone) SingleRead(mask, successor);
} }

View File

@ -57,24 +57,24 @@ unsigned resourceCost(Context* c,
} }
bool pickRegisterTarget(Context* c, bool pickRegisterTarget(Context* c,
int i, Register i,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
int* target, Register* target,
unsigned* cost, unsigned* cost,
CostCalculator* costCalculator) CostCalculator* costCalculator)
{ {
if ((1 << i) & mask) { if (mask.contains(i)) {
RegisterResource* r = c->registerResources + i; RegisterResource* r = c->registerResources + i.index();
unsigned myCost unsigned myCost
= resourceCost( = resourceCost(
c, c,
v, v,
r, r,
SiteMask(1 << lir::RegisterOperand, 1 << i, NoFrameIndex), SiteMask(lir::Operand::RegisterPairMask, RegisterMask(i), NoFrameIndex),
costCalculator) + Target::MinimumRegisterCost; costCalculator) + Target::MinimumRegisterCost;
if ((static_cast<uint32_t>(1) << i) == mask) { if (mask.containsExactly(i)) {
*cost = myCost; *cost = myCost;
return true; return true;
} else if (myCost < *cost) { } else if (myCost < *cost) {
@ -85,29 +85,25 @@ bool pickRegisterTarget(Context* c,
return false; return false;
} }
int pickRegisterTarget(Context* c, Register pickRegisterTarget(Context* c,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
unsigned* cost, unsigned* cost,
CostCalculator* costCalculator) CostCalculator* costCalculator)
{ {
int target = lir::NoRegister; Register target = NoRegister;
*cost = Target::Impossible; *cost = Target::Impossible;
if (mask & c->regFile->generalRegisters.mask) { if (mask & c->regFile->generalRegisters) {
for (int i = c->regFile->generalRegisters.limit - 1; for (Register i : c->regFile->generalRegisters) {
i >= c->regFile->generalRegisters.start;
--i) {
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) { if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
return i; return i;
} }
} }
} }
if (mask & c->regFile->floatRegisters.mask) { if (mask & c->regFile->floatRegisters) {
for (int i = c->regFile->floatRegisters.start; for (Register i : c->regFile->floatRegisters) {
i < static_cast<int>(c->regFile->floatRegisters.limit);
++i) {
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) { if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
return i; return i;
} }
@ -119,12 +115,12 @@ int pickRegisterTarget(Context* c,
Target pickRegisterTarget(Context* c, Target pickRegisterTarget(Context* c,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
CostCalculator* costCalculator) CostCalculator* costCalculator)
{ {
unsigned cost; unsigned cost;
int number = pickRegisterTarget(c, v, mask, &cost, costCalculator); Register number = pickRegisterTarget(c, v, mask, &cost, costCalculator);
return Target(number, lir::RegisterOperand, cost); return Target(number, cost);
} }
unsigned frameCost(Context* c, unsigned frameCost(Context* c,
@ -135,7 +131,7 @@ unsigned frameCost(Context* c,
return resourceCost(c, return resourceCost(c,
v, v,
c->frameResources + frameIndex, c->frameResources + frameIndex,
SiteMask(1 << lir::MemoryOperand, 0, frameIndex), SiteMask(lir::Operand::MemoryMask, 0, frameIndex),
costCalculator) + Target::MinimumFrameCost; costCalculator) + Target::MinimumFrameCost;
} }
@ -147,7 +143,7 @@ Target pickFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
do { do {
if (p->home >= 0) { if (p->home >= 0) {
Target mine(p->home, Target mine(p->home,
lir::MemoryOperand, lir::Operand::Type::Memory,
frameCost(c, v, p->home, costCalculator)); frameCost(c, v, p->home, costCalculator));
if (mine.cost == Target::MinimumFrameCost) { if (mine.cost == Target::MinimumFrameCost) {
@ -168,7 +164,7 @@ Target pickAnyFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
unsigned count = totalFrameSize(c); unsigned count = totalFrameSize(c);
for (unsigned i = 0; i < count; ++i) { for (unsigned i = 0; i < count; ++i) {
Target mine(i, lir::MemoryOperand, frameCost(c, v, i, costCalculator)); Target mine(i, lir::Operand::Type::Memory, frameCost(c, v, i, costCalculator));
if (mine.cost == Target::MinimumFrameCost) { if (mine.cost == Target::MinimumFrameCost) {
return mine; return mine;
} else if (mine.cost < best.cost) { } else if (mine.cost < best.cost) {
@ -186,7 +182,7 @@ Target pickTarget(Context* c,
Target best, Target best,
CostCalculator* costCalculator) CostCalculator* costCalculator)
{ {
if (mask.typeMask & (1 << lir::RegisterOperand)) { if (mask.typeMask & lir::Operand::RegisterPairMask) {
Target mine Target mine
= pickRegisterTarget(c, value, mask.registerMask, costCalculator); = pickRegisterTarget(c, value, mask.registerMask, costCalculator);
@ -198,10 +194,10 @@ Target pickTarget(Context* c,
} }
} }
if (mask.typeMask & (1 << lir::MemoryOperand)) { if (mask.typeMask & lir::Operand::MemoryMask) {
if (mask.frameIndex >= 0) { if (mask.frameIndex >= 0) {
Target mine(mask.frameIndex, Target mine(mask.frameIndex,
lir::MemoryOperand, lir::Operand::Type::Memory,
frameCost(c, value, mask.frameIndex, costCalculator)); frameCost(c, value, mask.frameIndex, costCalculator));
if (mine.cost == Target::MinimumFrameCost) { if (mine.cost == Target::MinimumFrameCost) {
return mine; return mine;
@ -234,14 +230,14 @@ Target pickTarget(Context* c,
Value* value = read->value; Value* value = read->value;
uint32_t registerMask RegisterMask registerMask
= (isFloatValue(value) ? ~0 : c->regFile->generalRegisters.mask); = (isFloatValue(value) ? AnyRegisterMask : (RegisterMask)c->regFile->generalRegisters);
SiteMask mask(~0, registerMask, AnyFrameIndex); SiteMask mask(~0, registerMask, AnyFrameIndex);
read->intersect(&mask); read->intersect(&mask);
if (isFloatValue(value)) { if (isFloatValue(value)) {
uint32_t floatMask = mask.registerMask & c->regFile->floatRegisters.mask; RegisterMask floatMask = mask.registerMask & c->regFile->floatRegisters;
if (floatMask) { if (floatMask) {
mask.registerMask = floatMask; mask.registerMask = floatMask;
} }
@ -273,9 +269,9 @@ Target pickTarget(Context* c,
if (intersectRead) { if (intersectRead) {
if (best.cost == Target::Impossible) { if (best.cost == Target::Impossible) {
fprintf(stderr, fprintf(stderr,
"mask type %d reg %d frame %d\n", "mask type %d reg %" LLD " frame %d\n",
mask.typeMask, mask.typeMask,
mask.registerMask, (uint64_t)mask.registerMask,
mask.frameIndex); mask.frameIndex);
abort(c); abort(c);
} }

View File

@ -55,13 +55,18 @@ class Target {
{ {
} }
Target(int index, lir::OperandType type, unsigned cost) Target(int16_t index, lir::Operand::Type type, unsigned cost)
: index(index), type(type), cost(cost) : index(index), type(type), cost(cost)
{ {
} }
Target(Register reg, unsigned cost)
: index(reg.index()), type(lir::Operand::Type::RegisterPair), cost(cost)
{
}
int16_t index; int16_t index;
lir::OperandType type; lir::Operand::Type type;
uint8_t cost; uint8_t cost;
}; };
@ -77,22 +82,22 @@ unsigned resourceCost(Context* c,
CostCalculator* costCalculator); CostCalculator* costCalculator);
bool pickRegisterTarget(Context* c, bool pickRegisterTarget(Context* c,
int i, Register i,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
int* target, Register* target,
unsigned* cost, unsigned* cost,
CostCalculator* costCalculator = 0); CostCalculator* costCalculator = 0);
int pickRegisterTarget(Context* c, Register pickRegisterTarget(Context* c,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
unsigned* cost, unsigned* cost,
CostCalculator* costCalculator = 0); CostCalculator* costCalculator = 0);
Target pickRegisterTarget(Context* c, Target pickRegisterTarget(Context* c,
Value* v, Value* v,
uint32_t mask, RegisterMask mask,
CostCalculator* costCalculator = 0); CostCalculator* costCalculator = 0);
unsigned frameCost(Context* c, unsigned frameCost(Context* c,

View File

@ -88,7 +88,7 @@ void RegisterResource::freeze(Context* c, Value* v)
freezeResource(c, this, v); freezeResource(c, this, v);
if (freezeCount == 1 if (freezeCount == 1
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) { and c->regFile->generalRegisters.contains(index(c))) {
decrementAvailableGeneralRegisterCount(c); decrementAvailableGeneralRegisterCount(c);
} }
} }
@ -100,7 +100,7 @@ void RegisterResource::thaw(Context* c, Value* v)
thawResource(c, this, v); thawResource(c, this, v);
if (freezeCount == 0 if (freezeCount == 0
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) { and c->regFile->generalRegisters.contains(index(c))) {
incrementAvailableGeneralRegisterCount(c); incrementAvailableGeneralRegisterCount(c);
} }
} }
@ -113,9 +113,9 @@ unsigned RegisterResource::toString(Context* c,
return vm::snprintf(buffer, bufferSize, "register %d", index(c)); return vm::snprintf(buffer, bufferSize, "register %d", index(c));
} }
unsigned RegisterResource::index(Context* c) Register RegisterResource::index(Context* c)
{ {
return this - c->registerResources; return Register(this - c->registerResources);
} }
void RegisterResource::increment(Context* c) void RegisterResource::increment(Context* c)
@ -130,7 +130,7 @@ void RegisterResource::increment(Context* c)
++this->referenceCount; ++this->referenceCount;
if (this->referenceCount == 1 if (this->referenceCount == 1
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) { and c->regFile->generalRegisters.contains(this->index(c))) {
decrementAvailableGeneralRegisterCount(c); decrementAvailableGeneralRegisterCount(c);
} }
} }
@ -150,7 +150,7 @@ void RegisterResource::decrement(Context* c)
--this->referenceCount; --this->referenceCount;
if (this->referenceCount == 0 if (this->referenceCount == 0
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) { and c->regFile->generalRegisters.contains(this->index(c))) {
incrementAvailableGeneralRegisterCount(c); incrementAvailableGeneralRegisterCount(c);
} }
} }

View File

@ -48,7 +48,7 @@ class RegisterResource : public Resource {
virtual unsigned toString(Context* c, char* buffer, unsigned bufferSize); virtual unsigned toString(Context* c, char* buffer, unsigned bufferSize);
virtual unsigned index(Context*); virtual Register index(Context*);
void increment(Context*); void increment(Context*);

View File

@ -152,7 +152,7 @@ class AddressSite : public Site {
virtual bool match(Context*, const SiteMask& mask) virtual bool match(Context*, const SiteMask& mask)
{ {
return mask.typeMask & (1 << lir::AddressOperand); return mask.typeMask & lir::Operand::AddressMask;
} }
virtual bool loneMatch(Context*, const SiteMask&) virtual bool loneMatch(Context*, const SiteMask&)
@ -165,9 +165,9 @@ class AddressSite : public Site {
abort(c); abort(c);
} }
virtual lir::OperandType type(Context*) virtual lir::Operand::Type type(Context*)
{ {
return lir::AddressOperand; return lir::Operand::Type::Address;
} }
virtual void asAssemblerOperand(Context* c UNUSED, virtual void asAssemblerOperand(Context* c UNUSED,
@ -201,7 +201,7 @@ class AddressSite : public Site {
virtual SiteMask mask(Context*) virtual SiteMask mask(Context*)
{ {
return SiteMask(1 << lir::AddressOperand, 0, NoFrameIndex); return SiteMask(lir::Operand::AddressMask, 0, NoFrameIndex);
} }
virtual SiteMask nextWordMask(Context* c, unsigned) virtual SiteMask nextWordMask(Context* c, unsigned)
@ -217,14 +217,14 @@ Site* addressSite(Context* c, Promise* address)
return new (c->zone) AddressSite(address); return new (c->zone) AddressSite(address);
} }
RegisterSite::RegisterSite(uint32_t mask, int number) RegisterSite::RegisterSite(RegisterMask mask, Register number)
: mask_(mask), number(number) : mask_(mask), number(number)
{ {
} }
unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize) unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
{ {
if (number != lir::NoRegister) { if (number != NoRegister) {
return vm::snprintf(buffer, bufferSize, "%p register %d", this, number); return vm::snprintf(buffer, bufferSize, "%p register %d", this, number);
} else { } else {
return vm::snprintf( return vm::snprintf(
@ -234,11 +234,11 @@ unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
unsigned RegisterSite::copyCost(Context* c, Site* s) unsigned RegisterSite::copyCost(Context* c, Site* s)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if (s and (this == s if (s and (this == s
or (s->type(c) == lir::RegisterOperand or (s->type(c) == lir::Operand::Type::RegisterPair
and (static_cast<RegisterSite*>(s)->mask_ & (1 << number))))) { and (static_cast<RegisterSite*>(s)->mask_.contains(number))))) {
return 0; return 0;
} else { } else {
return RegisterCopyCost; return RegisterCopyCost;
@ -247,10 +247,10 @@ unsigned RegisterSite::copyCost(Context* c, Site* s)
bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask) bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if ((mask.typeMask & (1 << lir::RegisterOperand))) { if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
return ((static_cast<uint64_t>(1) << number) & mask.registerMask); return mask.registerMask.contains(number);
} else { } else {
return false; return false;
} }
@ -258,10 +258,10 @@ bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask) bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if ((mask.typeMask & (1 << lir::RegisterOperand))) { if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
return ((static_cast<uint64_t>(1) << number) == mask.registerMask); return mask.registerMask.containsExactly(number);
} else { } else {
return false; return false;
} }
@ -269,28 +269,28 @@ bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned) bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if (s->type(c) != lir::RegisterOperand) { if (s->type(c) != lir::Operand::Type::RegisterPair) {
return false; return false;
} }
RegisterSite* rs = static_cast<RegisterSite*>(s); RegisterSite* rs = static_cast<RegisterSite*>(s);
unsigned size = rs->registerSize(c); unsigned size = rs->registerSize(c);
if (size > c->targetInfo.pointerSize) { if (size > c->targetInfo.pointerSize) {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
return number == rs->number; return number == rs->number;
} else { } else {
uint32_t mask = c->regFile->generalRegisters.mask; RegisterMask mask = c->regFile->generalRegisters;
return ((1 << number) & mask) and ((1 << rs->number) & mask); return mask.contains(number) and mask.contains(rs->number);
} }
} }
void RegisterSite::acquire(Context* c, Value* v) void RegisterSite::acquire(Context* c, Value* v)
{ {
Target target; Target target;
if (number != lir::NoRegister) { if (number != NoRegister) {
target = Target(number, lir::RegisterOperand, 0); target = Target(number, 0);
} else { } else {
target = pickRegisterTarget(c, v, mask_); target = pickRegisterTarget(c, v, mask_);
expect(c, target.cost < Target::Impossible); expect(c, target.cost < Target::Impossible);
@ -299,65 +299,65 @@ void RegisterSite::acquire(Context* c, Value* v)
RegisterResource* resource = c->registerResources + target.index; RegisterResource* resource = c->registerResources + target.index;
compiler::acquire(c, resource, v, this); compiler::acquire(c, resource, v, this);
number = target.index; number = Register(target.index);
} }
void RegisterSite::release(Context* c, Value* v) void RegisterSite::release(Context* c, Value* v)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
compiler::release(c, c->registerResources + number, v, this); compiler::release(c, c->registerResources + number.index(), v, this);
} }
void RegisterSite::freeze(Context* c, Value* v) void RegisterSite::freeze(Context* c, Value* v)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
c->registerResources[number].freeze(c, v); c->registerResources[number.index()].freeze(c, v);
} }
void RegisterSite::thaw(Context* c, Value* v) void RegisterSite::thaw(Context* c, Value* v)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
c->registerResources[number].thaw(c, v); c->registerResources[number.index()].thaw(c, v);
} }
bool RegisterSite::frozen(Context* c UNUSED) bool RegisterSite::frozen(Context* c UNUSED)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
return c->registerResources[number].freezeCount != 0; return c->registerResources[number.index()].freezeCount != 0;
} }
lir::OperandType RegisterSite::type(Context*) lir::Operand::Type RegisterSite::type(Context*)
{ {
return lir::RegisterOperand; return lir::Operand::Type::RegisterPair;
} }
void RegisterSite::asAssemblerOperand(Context* c UNUSED, void RegisterSite::asAssemblerOperand(Context* c UNUSED,
Site* high, Site* high,
lir::Operand* result) lir::Operand* result)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
int highNumber; Register highNumber;
if (high != this) { if (high != this) {
highNumber = static_cast<RegisterSite*>(high)->number; highNumber = static_cast<RegisterSite*>(high)->number;
assertT(c, highNumber != lir::NoRegister); assertT(c, highNumber != NoRegister);
} else { } else {
highNumber = lir::NoRegister; highNumber = NoRegister;
} }
new (result) lir::Register(number, highNumber); new (result) lir::RegisterPair(number, highNumber);
} }
Site* RegisterSite::copy(Context* c) Site* RegisterSite::copy(Context* c)
{ {
uint32_t mask; RegisterMask mask;
if (number != lir::NoRegister) { if (number != NoRegister) {
mask = 1 << number; mask = RegisterMask(number);
} else { } else {
mask = mask_; mask = mask_;
} }
@ -377,64 +377,64 @@ Site* RegisterSite::copyHigh(Context* c)
Site* RegisterSite::makeNextWord(Context* c, unsigned) Site* RegisterSite::makeNextWord(Context* c, unsigned)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
assertT(c, ((1 << number) & c->regFile->generalRegisters.mask)); assertT(c, c->regFile->generalRegisters.contains(number));
return freeRegisterSite(c, c->regFile->generalRegisters.mask); return freeRegisterSite(c, c->regFile->generalRegisters);
} }
SiteMask RegisterSite::mask(Context* c UNUSED) SiteMask RegisterSite::mask(Context* c UNUSED)
{ {
return SiteMask(1 << lir::RegisterOperand, mask_, NoFrameIndex); return SiteMask(lir::Operand::RegisterPairMask, mask_, NoFrameIndex);
} }
SiteMask RegisterSite::nextWordMask(Context* c, unsigned) SiteMask RegisterSite::nextWordMask(Context* c, unsigned)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if (registerSize(c) > c->targetInfo.pointerSize) { if (registerSize(c) > c->targetInfo.pointerSize) {
return SiteMask(1 << lir::RegisterOperand, number, NoFrameIndex); return SiteMask(lir::Operand::RegisterPairMask, number, NoFrameIndex);
} else { } else {
return SiteMask(1 << lir::RegisterOperand, return SiteMask(lir::Operand::RegisterPairMask,
c->regFile->generalRegisters.mask, c->regFile->generalRegisters,
NoFrameIndex); NoFrameIndex);
} }
} }
unsigned RegisterSite::registerSize(Context* c) unsigned RegisterSite::registerSize(Context* c)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
if ((1 << number) & c->regFile->floatRegisters.mask) { if (c->regFile->floatRegisters.contains(number)) {
return c->arch->floatRegisterSize(); return c->arch->floatRegisterSize();
} else { } else {
return c->targetInfo.pointerSize; return c->targetInfo.pointerSize;
} }
} }
unsigned RegisterSite::registerMask(Context* c UNUSED) RegisterMask RegisterSite::registerMask(Context* c UNUSED)
{ {
assertT(c, number != lir::NoRegister); assertT(c, number != NoRegister);
return 1 << number; return RegisterMask(number);
} }
Site* registerSite(Context* c, int number) Site* registerSite(Context* c, Register number)
{ {
assertT(c, number >= 0); assertT(c, number != NoRegister);
assertT(c, assertT(c,
(1 << number) & (c->regFile->generalRegisters.mask (c->regFile->generalRegisters
| c->regFile->floatRegisters.mask)); | c->regFile->floatRegisters).contains(number));
return new (c->zone) RegisterSite(1 << number, number); return new (c->zone) RegisterSite(RegisterMask(number), number);
} }
Site* freeRegisterSite(Context* c, uint32_t mask) Site* freeRegisterSite(Context* c, RegisterMask mask)
{ {
return new (c->zone) RegisterSite(mask, lir::NoRegister); return new (c->zone) RegisterSite(mask, NoRegister);
} }
MemorySite::MemorySite(int base, int offset, int index, unsigned scale) MemorySite::MemorySite(Register base, int offset, Register index, unsigned scale)
: acquired(false), base(base), offset(offset), index(index), scale(scale) : acquired(false), base(base), offset(offset), index(index), scale(scale)
{ {
} }
@ -453,7 +453,7 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
{ {
assertT(c, acquired); assertT(c, acquired);
if (s and (this == s or (s->type(c) == lir::MemoryOperand if (s and (this == s or (s->type(c) == lir::Operand::Type::Memory
and static_cast<MemorySite*>(s)->base == base and static_cast<MemorySite*>(s)->base == base
and static_cast<MemorySite*>(s)->offset == offset and static_cast<MemorySite*>(s)->offset == offset
and static_cast<MemorySite*>(s)->index == index and static_cast<MemorySite*>(s)->index == index
@ -466,20 +466,20 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
bool MemorySite::conflicts(const SiteMask& mask) bool MemorySite::conflicts(const SiteMask& mask)
{ {
return (mask.typeMask & (1 << lir::RegisterOperand)) != 0 return (mask.typeMask & lir::Operand::RegisterPairMask) != 0
and (((1 << base) & mask.registerMask) == 0 and (!mask.registerMask.contains(base)
or (index != lir::NoRegister or (index != NoRegister
and ((1 << index) & mask.registerMask) == 0)); and !mask.registerMask.contains(index)));
} }
bool MemorySite::match(Context* c, const SiteMask& mask) bool MemorySite::match(Context* c, const SiteMask& mask)
{ {
assertT(c, acquired); assertT(c, acquired);
if (mask.typeMask & (1 << lir::MemoryOperand)) { if (mask.typeMask & lir::Operand::MemoryMask) {
if (mask.frameIndex >= 0) { if (mask.frameIndex >= 0) {
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister); assertT(c, index == NoRegister);
return static_cast<int>(frameIndexToOffset(c, mask.frameIndex)) return static_cast<int>(frameIndexToOffset(c, mask.frameIndex))
== offset; == offset;
} else { } else {
@ -497,9 +497,9 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
{ {
assertT(c, acquired); assertT(c, acquired);
if (mask.typeMask & (1 << lir::MemoryOperand)) { if (mask.typeMask & lir::Operand::MemoryMask) {
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister); assertT(c, index == NoRegister);
if (mask.frameIndex == AnyFrameIndex) { if (mask.frameIndex == AnyFrameIndex) {
return false; return false;
@ -513,7 +513,7 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index) bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
{ {
if (s->type(c) == lir::MemoryOperand) { if (s->type(c) == lir::Operand::Type::Memory) {
MemorySite* ms = static_cast<MemorySite*>(s); MemorySite* ms = static_cast<MemorySite*>(s);
return ms->base == this->base return ms->base == this->base
and ((index == 1 and ((index == 1
@ -532,13 +532,13 @@ bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
void MemorySite::acquire(Context* c, Value* v) void MemorySite::acquire(Context* c, Value* v)
{ {
c->registerResources[base].increment(c); c->registerResources[base.index()].increment(c);
if (index != lir::NoRegister) { if (index != NoRegister) {
c->registerResources[index].increment(c); c->registerResources[index.index()].increment(c);
} }
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister); assertT(c, index == NoRegister);
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved); assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
compiler::acquire( compiler::acquire(
@ -551,16 +551,16 @@ void MemorySite::acquire(Context* c, Value* v)
void MemorySite::release(Context* c, Value* v) void MemorySite::release(Context* c, Value* v)
{ {
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister); assertT(c, index == NoRegister);
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved); assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
compiler::release( compiler::release(
c, c->frameResources + offsetToFrameIndex(c, offset), v, this); c, c->frameResources + offsetToFrameIndex(c, offset), v, this);
} }
c->registerResources[base].decrement(c); c->registerResources[base.index()].decrement(c);
if (index != lir::NoRegister) { if (index != NoRegister) {
c->registerResources[index].decrement(c); c->registerResources[index.index()].decrement(c);
} }
acquired = false; acquired = false;
@ -571,9 +571,9 @@ void MemorySite::freeze(Context* c, Value* v)
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
c->frameResources[offsetToFrameIndex(c, offset)].freeze(c, v); c->frameResources[offsetToFrameIndex(c, offset)].freeze(c, v);
} else { } else {
c->registerResources[base].increment(c); c->registerResources[base.index()].increment(c);
if (index != lir::NoRegister) { if (index != NoRegister) {
c->registerResources[index].increment(c); c->registerResources[index.index()].increment(c);
} }
} }
} }
@ -583,9 +583,9 @@ void MemorySite::thaw(Context* c, Value* v)
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
c->frameResources[offsetToFrameIndex(c, offset)].thaw(c, v); c->frameResources[offsetToFrameIndex(c, offset)].thaw(c, v);
} else { } else {
c->registerResources[base].decrement(c); c->registerResources[base.index()].decrement(c);
if (index != lir::NoRegister) { if (index != NoRegister) {
c->registerResources[index].decrement(c); c->registerResources[index.index()].decrement(c);
} }
} }
} }
@ -596,9 +596,9 @@ bool MemorySite::frozen(Context* c)
and c->frameResources[offsetToFrameIndex(c, offset)].freezeCount != 0; and c->frameResources[offsetToFrameIndex(c, offset)].freezeCount != 0;
} }
lir::OperandType MemorySite::type(Context*) lir::Operand::Type MemorySite::type(Context*)
{ {
return lir::MemoryOperand; return lir::Operand::Type::Memory;
} }
void MemorySite::asAssemblerOperand(Context* c UNUSED, void MemorySite::asAssemblerOperand(Context* c UNUSED,
@ -657,7 +657,7 @@ Site* MemorySite::makeNextWord(Context* c, unsigned index)
SiteMask MemorySite::mask(Context* c) SiteMask MemorySite::mask(Context* c)
{ {
return SiteMask(1 << lir::MemoryOperand, return SiteMask(lir::Operand::MemoryMask,
0, 0,
(base == c->arch->stack()) (base == c->arch->stack())
? static_cast<int>(offsetToFrameIndex(c, offset)) ? static_cast<int>(offsetToFrameIndex(c, offset))
@ -668,13 +668,13 @@ SiteMask MemorySite::nextWordMask(Context* c, unsigned index)
{ {
int frameIndex; int frameIndex;
if (base == c->arch->stack()) { if (base == c->arch->stack()) {
assertT(c, this->index == lir::NoRegister); assertT(c, this->index == NoRegister);
frameIndex = static_cast<int>(offsetToFrameIndex(c, offset)) frameIndex = static_cast<int>(offsetToFrameIndex(c, offset))
+ ((index == 1) xor c->arch->bigEndian() ? 1 : -1); + ((index == 1) xor c->arch->bigEndian() ? 1 : -1);
} else { } else {
frameIndex = NoFrameIndex; frameIndex = NoFrameIndex;
} }
return SiteMask(1 << lir::MemoryOperand, 0, frameIndex); return SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
} }
bool MemorySite::isVolatile(Context* c) bool MemorySite::isVolatile(Context* c)
@ -683,9 +683,9 @@ bool MemorySite::isVolatile(Context* c)
} }
MemorySite* memorySite(Context* c, MemorySite* memorySite(Context* c,
int base, Register base,
int offset, int offset,
int index, Register index,
unsigned scale) unsigned scale)
{ {
return new (c->zone) MemorySite(base, offset, index, scale); return new (c->zone) MemorySite(base, offset, index, scale);
@ -697,7 +697,7 @@ MemorySite* frameSite(Context* c, int frameIndex)
return memorySite(c, return memorySite(c,
c->arch->stack(), c->arch->stack(),
frameIndexToOffset(c, frameIndex), frameIndexToOffset(c, frameIndex),
lir::NoRegister, NoRegister,
0); 0);
} }

View File

@ -34,30 +34,30 @@ class SiteMask {
{ {
} }
SiteMask(uint8_t typeMask, uint32_t registerMask, int frameIndex) SiteMask(uint8_t typeMask, RegisterMask registerMask, int frameIndex)
: typeMask(typeMask), registerMask(registerMask), frameIndex(frameIndex) : typeMask(typeMask), registerMask(registerMask), frameIndex(frameIndex)
{ {
} }
SiteMask intersectionWith(const SiteMask& b); SiteMask intersectionWith(const SiteMask& b);
static SiteMask fixedRegisterMask(int number) static SiteMask fixedRegisterMask(Register number)
{ {
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex); return SiteMask(lir::Operand::RegisterPairMask, 1 << number.index(), NoFrameIndex);
} }
static SiteMask lowPart(const OperandMask& mask) static SiteMask lowPart(const OperandMask& mask)
{ {
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex); return SiteMask(mask.typeMask, mask.lowRegisterMask, AnyFrameIndex);
} }
static SiteMask highPart(const OperandMask& mask) static SiteMask highPart(const OperandMask& mask)
{ {
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex); return SiteMask(mask.typeMask, mask.highRegisterMask, AnyFrameIndex);
} }
uint8_t typeMask; uint8_t typeMask;
uint32_t registerMask; RegisterMask registerMask;
int frameIndex; int frameIndex;
}; };
@ -103,7 +103,7 @@ class Site {
return false; return false;
} }
virtual lir::OperandType type(Context*) = 0; virtual lir::Operand::Type type(Context*) = 0;
virtual void asAssemblerOperand(Context*, Site*, lir::Operand*) = 0; virtual void asAssemblerOperand(Context*, Site*, lir::Operand*) = 0;
@ -121,9 +121,9 @@ class Site {
virtual unsigned registerSize(Context*); virtual unsigned registerSize(Context*);
virtual unsigned registerMask(Context*) virtual RegisterMask registerMask(Context*)
{ {
return 0; return RegisterMask(0);
} }
virtual bool isVolatile(Context*) virtual bool isVolatile(Context*)
@ -187,7 +187,7 @@ class ConstantSite : public Site {
virtual bool match(Context*, const SiteMask& mask) virtual bool match(Context*, const SiteMask& mask)
{ {
return mask.typeMask & (1 << lir::ConstantOperand); return mask.typeMask & lir::Operand::ConstantMask;
} }
virtual bool loneMatch(Context*, const SiteMask&) virtual bool loneMatch(Context*, const SiteMask&)
@ -197,12 +197,12 @@ class ConstantSite : public Site {
virtual bool matchNextWord(Context* c, Site* s, unsigned) virtual bool matchNextWord(Context* c, Site* s, unsigned)
{ {
return s->type(c) == lir::ConstantOperand; return s->type(c) == lir::Operand::Type::Constant;
} }
virtual lir::OperandType type(Context*) virtual lir::Operand::Type type(Context*)
{ {
return lir::ConstantOperand; return lir::Operand::Type::Constant;
} }
virtual void asAssemblerOperand(Context* c, Site* high, lir::Operand* result) virtual void asAssemblerOperand(Context* c, Site* high, lir::Operand* result)
@ -236,12 +236,12 @@ class ConstantSite : public Site {
virtual SiteMask mask(Context*) virtual SiteMask mask(Context*)
{ {
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex); return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
} }
virtual SiteMask nextWordMask(Context*, unsigned) virtual SiteMask nextWordMask(Context*, unsigned)
{ {
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex); return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
} }
Promise* value; Promise* value;
@ -251,7 +251,7 @@ Site* addressSite(Context* c, Promise* address);
class RegisterSite : public Site { class RegisterSite : public Site {
public: public:
RegisterSite(uint32_t mask, int number); RegisterSite(RegisterMask mask, Register number);
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize); virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
@ -273,7 +273,7 @@ class RegisterSite : public Site {
virtual bool frozen(Context* c UNUSED); virtual bool frozen(Context* c UNUSED);
virtual lir::OperandType type(Context*); virtual lir::Operand::Type type(Context*);
virtual void asAssemblerOperand(Context* c UNUSED, virtual void asAssemblerOperand(Context* c UNUSED,
Site* high, Site* high,
@ -293,18 +293,18 @@ class RegisterSite : public Site {
virtual unsigned registerSize(Context* c); virtual unsigned registerSize(Context* c);
virtual unsigned registerMask(Context* c UNUSED); virtual RegisterMask registerMask(Context* c UNUSED);
uint32_t mask_; RegisterMask mask_;
int number; Register number;
}; };
Site* registerSite(Context* c, int number); Site* registerSite(Context* c, Register number);
Site* freeRegisterSite(Context* c, uint32_t mask); Site* freeRegisterSite(Context* c, RegisterMask mask);
class MemorySite : public Site { class MemorySite : public Site {
public: public:
MemorySite(int base, int offset, int index, unsigned scale); MemorySite(Register base, int offset, Register index, unsigned scale);
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize); virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
@ -328,7 +328,7 @@ class MemorySite : public Site {
virtual bool frozen(Context* c); virtual bool frozen(Context* c);
virtual lir::OperandType type(Context*); virtual lir::Operand::Type type(Context*);
virtual void asAssemblerOperand(Context* c UNUSED, virtual void asAssemblerOperand(Context* c UNUSED,
Site* high UNUSED, Site* high UNUSED,
@ -351,16 +351,16 @@ class MemorySite : public Site {
virtual bool isVolatile(Context* c); virtual bool isVolatile(Context* c);
bool acquired; bool acquired;
int base; Register base;
int offset; int offset;
int index; Register index;
unsigned scale; unsigned scale;
}; };
MemorySite* memorySite(Context* c, MemorySite* memorySite(Context* c,
int base, Register base,
int offset = 0, int offset = 0,
int index = lir::NoRegister, Register index = NoRegister,
unsigned scale = 1); unsigned scale = 1);
MemorySite* frameSite(Context* c, int frameIndex); MemorySite* frameSite(Context* c, int frameIndex);

View File

@ -1,35 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <avian/codegen/registers.h>
namespace avian {
namespace codegen {
unsigned RegisterMask::maskStart(uint32_t mask)
{
for (int i = 0; i <= 31; ++i) {
if (mask & (1 << i))
return i;
}
return 32;
}
unsigned RegisterMask::maskLimit(uint32_t mask)
{
for (int i = 31; i >= 0; --i) {
if (mask & (1 << i))
return i + 1;
}
return 0;
}
} // namespace codegen
} // namespace avian

View File

@ -4,5 +4,6 @@ add_library(avian_codegen_arm
context.cpp context.cpp
fixup.cpp fixup.cpp
multimethod.cpp multimethod.cpp
operations.cpp operations32.cpp
operations64.cpp
) )

View File

@ -39,7 +39,7 @@ namespace isa {
bool vfpSupported() bool vfpSupported()
{ {
// TODO: Use at runtime detection // TODO: Use at runtime detection
#if defined(__ARM_PCS_VFP) #if (defined __ARM_PCS_VFP) || (defined ARCH_arm64)
// armhf // armhf
return true; return true;
#else #else
@ -52,17 +52,12 @@ bool vfpSupported()
} }
} // namespace isa } // namespace isa
inline unsigned lo8(int64_t i)
{
return (unsigned)(i & MASK_LO8);
}
const RegisterFile MyRegisterFileWithoutFloats(GPR_MASK, 0); const RegisterFile MyRegisterFileWithoutFloats(GPR_MASK, 0);
const RegisterFile MyRegisterFileWithFloats(GPR_MASK, FPR_MASK); const RegisterFile MyRegisterFileWithFloats(GPR_MASK, FPR_MASK);
const unsigned FrameHeaderSize = 1; const unsigned FrameHeaderSize = TargetBytesPerWord / 4;
const unsigned StackAlignmentInBytes = 8; const unsigned StackAlignmentInBytes = TargetBytesPerWord * 2;
const unsigned StackAlignmentInWords = StackAlignmentInBytes const unsigned StackAlignmentInWords = StackAlignmentInBytes
/ TargetBytesPerWord; / TargetBytesPerWord;
@ -94,11 +89,11 @@ void nextFrame(ArchitectureContext* con,
void** stack) void** stack)
{ {
assertT(con, *ip >= start); assertT(con, *ip >= start);
assertT(con, *ip <= start + (size / TargetBytesPerWord)); assertT(con, *ip <= start + (size / 4));
uint32_t* instruction = static_cast<uint32_t*>(*ip); uint32_t* instruction = static_cast<uint32_t*>(*ip);
if ((*start >> 20) == 0xe59) { if ((*start >> 20) == (TargetBytesPerWord == 8 ? 0xf94 : 0xe59)) {
// skip stack overflow check // skip stack overflow check
start += 3; start += 3;
} }
@ -116,7 +111,8 @@ void nextFrame(ArchitectureContext* con,
return; return;
} }
if (*instruction == 0xe12fff1e) { // return if (*instruction == (TargetBytesPerWord == 8 ? 0xd61f03c0 : 0xe12fff1e)) {
// return
*ip = link; *ip = link;
return; return;
} }
@ -129,7 +125,20 @@ void nextFrame(ArchitectureContext* con,
// check for post-non-tail-call stack adjustment of the form "sub // check for post-non-tail-call stack adjustment of the form "sub
// sp, sp, #offset": // sp, sp, #offset":
if ((*instruction >> 12) == 0xe24dd) { if (TargetBytesPerWord == 8 and (*instruction & 0xff0003ff) == 0xd10003ff) {
unsigned value = (*instruction >> 10) & 0xfff;
unsigned shift = (*instruction >> 22) & 1;
switch (shift) {
case 0:
offset -= value / TargetBytesPerWord;
break;
case 1:
offset -= (value << 12) / TargetBytesPerWord;
break;
default:
abort(con);
}
} else if (TargetBytesPerWord == 4 and (*instruction >> 12) == 0xe24dd) {
unsigned value = *instruction & 0xff; unsigned value = *instruction & 0xff;
unsigned rotation = (*instruction >> 8) & 0xf; unsigned rotation = (*instruction >> 8) & 0xf;
switch (rotation) { switch (rotation) {
@ -169,39 +178,39 @@ class MyArchitecture : public Architecture {
: &MyRegisterFileWithoutFloats; : &MyRegisterFileWithoutFloats;
} }
virtual int scratch() virtual Register scratch()
{ {
return 5; return Register(5);
} }
virtual int stack() virtual Register stack()
{ {
return StackRegister; return StackRegister;
} }
virtual int thread() virtual Register thread()
{ {
return ThreadRegister; return ThreadRegister;
} }
virtual int returnLow() virtual Register returnLow()
{ {
return 0; return Register(0);
} }
virtual int returnHigh() virtual Register returnHigh()
{ {
return 1; return Register(1);
} }
virtual int virtualCallTarget() virtual Register virtualCallTarget()
{ {
return 4; return Register(4);
} }
virtual int virtualCallIndex() virtual Register virtualCallIndex()
{ {
return 3; return Register(3);
} }
virtual ir::TargetInfo targetInfo() virtual ir::TargetInfo targetInfo()
@ -219,13 +228,14 @@ class MyArchitecture : public Architecture {
return 0x1FFFFFF; return 0x1FFFFFF;
} }
virtual bool reserved(int register_) virtual bool reserved(Register register_)
{ {
switch (register_) { switch (register_.index()) {
case LinkRegister: case LinkRegister.index():
case StackRegister: case FrameRegister.index():
case ThreadRegister: case StackRegister.index():
case ProgramCounter: case ThreadRegister.index():
case ProgramCounter.index():
return true; return true;
default: default:
@ -263,14 +273,14 @@ class MyArchitecture : public Architecture {
virtual unsigned argumentRegisterCount() virtual unsigned argumentRegisterCount()
{ {
return 4; return TargetBytesPerWord;
} }
virtual int argumentRegister(unsigned index) virtual Register argumentRegister(unsigned index)
{ {
assertT(&con, index < argumentRegisterCount()); assertT(&con, index < argumentRegisterCount());
return index; return Register(index);
} }
virtual bool hasLinkRegister() virtual bool hasLinkRegister()
@ -311,8 +321,13 @@ class MyArchitecture : public Architecture {
case lir::AlignedLongCall: case lir::AlignedLongCall:
case lir::AlignedLongJump: { case lir::AlignedLongJump: {
uint32_t* p = static_cast<uint32_t*>(returnAddress) - 2; uint32_t* p = static_cast<uint32_t*>(returnAddress) - 2;
if (TargetBytesPerWord == 8) {
const int32_t mask = (PoolOffsetMask >> 2) << 5;
*reinterpret_cast<void**>(p + ((*p & mask) >> 5)) = newTarget;
} else {
*reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4)) *reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4))
= newTarget; = newTarget;
}
} break; } break;
default: default:
@ -401,8 +416,8 @@ class MyArchitecture : public Architecture {
OperandMask& aMask, OperandMask& aMask,
bool* thunk) bool* thunk)
{ {
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.registerMask = ~static_cast<uint64_t>(0); aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
*thunk = false; *thunk = false;
} }
@ -414,12 +429,12 @@ class MyArchitecture : public Architecture {
{ {
*thunk = false; *thunk = false;
aMask.typeMask = ~0; aMask.typeMask = ~0;
aMask.registerMask = GPR_MASK64; aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = GPR_MASK64; aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break; break;
case lir::Absolute: case lir::Absolute:
@ -431,30 +446,30 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate: case lir::FloatNegate:
case lir::Float2Float: case lir::Float2Float:
if (vfpSupported()) { if (vfpSupported()) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = FPR_MASK64; aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
} else { } else {
*thunk = true; *thunk = true;
} }
break; break;
case lir::Float2Int: case lir::Float2Int:
// todo: Java requires different semantics than SSE for // todo: Java requires different semantics than VFP for
// converting floats to integers, we we need to either use // converting floats to integers, we we need to either use
// thunks or produce inline machine code which handles edge // thunks or produce inline machine code which handles edge
// cases properly. // cases properly.
if (false && vfpSupported() && bSize == 4) { if (false && vfpSupported() && bSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = FPR_MASK64; aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
} else { } else {
*thunk = true; *thunk = true;
} }
break; break;
case lir::Int2Float: case lir::Int2Float:
if (vfpSupported() && aSize == 4) { if (vfpSupported() && aSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = GPR_MASK64; aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
} else { } else {
*thunk = true; *thunk = true;
} }
@ -471,13 +486,13 @@ class MyArchitecture : public Architecture {
unsigned, unsigned,
OperandMask& bMask) OperandMask& bMask)
{ {
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); bMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask;
bMask.registerMask = GPR_MASK64; bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = GPR_MASK64; bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break; break;
case lir::FloatAbsolute: case lir::FloatAbsolute:
@ -485,18 +500,18 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate: case lir::FloatNegate:
case lir::Float2Float: case lir::Float2Float:
case lir::Int2Float: case lir::Int2Float:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = FPR_MASK64; bMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
break; break;
case lir::Float2Int: case lir::Float2Int:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = GPR_MASK64; bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break; break;
case lir::Move: case lir::Move:
if (!(aMask.typeMask & 1 << lir::RegisterOperand)) { if (!(aMask.typeMask & lir::Operand::RegisterPairMask)) {
bMask.typeMask = 1 << lir::RegisterOperand; bMask.typeMask = lir::Operand::RegisterPairMask;
} }
break; break;
@ -511,21 +526,21 @@ class MyArchitecture : public Architecture {
const OperandMask& dstMask) const OperandMask& dstMask)
{ {
srcMask.typeMask = ~0; srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0); srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
tmpMask.typeMask = 0; tmpMask.typeMask = 0;
tmpMask.registerMask = 0; tmpMask.setLowHighRegisterMasks(0, 0);
if (dstMask.typeMask & (1 << lir::MemoryOperand)) { if (dstMask.typeMask & lir::Operand::MemoryMask) {
// can't move directly from memory or constant to memory // can't move directly from memory or constant to memory
srcMask.typeMask = 1 << lir::RegisterOperand; srcMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.typeMask = 1 << lir::RegisterOperand; tmpMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.registerMask = GPR_MASK64; tmpMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
} else if (vfpSupported() && dstMask.typeMask & 1 << lir::RegisterOperand } else if (vfpSupported() && dstMask.typeMask & lir::Operand::RegisterPairMask
&& dstMask.registerMask & FPR_MASK) { && dstMask.lowRegisterMask & FPR_MASK) {
srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand srcMask.typeMask = tmpMask.typeMask = lir::Operand::RegisterPairMask
| 1 << lir::MemoryOperand; | lir::Operand::MemoryMask;
tmpMask.registerMask = ~static_cast<uint64_t>(0); tmpMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
} }
} }
@ -537,11 +552,11 @@ class MyArchitecture : public Architecture {
unsigned, unsigned,
bool* thunk) bool* thunk)
{ {
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.registerMask = GPR_MASK64; aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = GPR_MASK64; bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
*thunk = false; *thunk = false;
@ -549,8 +564,8 @@ class MyArchitecture : public Architecture {
case lir::ShiftLeft: case lir::ShiftLeft:
case lir::ShiftRight: case lir::ShiftRight:
case lir::UnsignedShiftRight: case lir::UnsignedShiftRight:
if (bSize == 8) if (bSize > TargetBytesPerWord)
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
break; break;
case lir::Add: case lir::Add:
@ -558,9 +573,14 @@ class MyArchitecture : public Architecture {
case lir::Or: case lir::Or:
case lir::Xor: case lir::Xor:
case lir::Multiply: case lir::Multiply:
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
break; break;
// todo: Although ARM has instructions for integer division and
// remainder, they don't trap on division by zero, which is why
// we use thunks. Alternatively, we could generate inline code
// with an explicit zero check, which would probably be a bit
// faster.
case lir::Divide: case lir::Divide:
case lir::Remainder: case lir::Remainder:
case lir::FloatRemainder: case lir::FloatRemainder:
@ -572,8 +592,9 @@ class MyArchitecture : public Architecture {
case lir::FloatMultiply: case lir::FloatMultiply:
case lir::FloatDivide: case lir::FloatDivide:
if (vfpSupported()) { if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = bMask.registerMask = FPR_MASK64; aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
bMask = aMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -590,8 +611,9 @@ class MyArchitecture : public Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (vfpSupported()) { if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = bMask.registerMask = FPR_MASK64; aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
bMask = aMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -611,11 +633,12 @@ class MyArchitecture : public Architecture {
OperandMask& cMask) OperandMask& cMask)
{ {
if (isBranch(op)) { if (isBranch(op)) {
cMask.typeMask = (1 << lir::ConstantOperand); cMask.typeMask = lir::Operand::ConstantMask;
cMask.registerMask = 0; cMask.setLowHighRegisterMasks(0, 0);
} else { } else {
cMask.typeMask = (1 << lir::RegisterOperand); cMask.typeMask = lir::Operand::RegisterPairMask;
cMask.registerMask = bMask.registerMask; cMask.lowRegisterMask = bMask.lowRegisterMask;
cMask.highRegisterMask = bMask.highRegisterMask;
} }
} }
@ -658,7 +681,7 @@ class MyAssembler : public Assembler {
virtual void checkStackOverflow(uintptr_t handler, virtual void checkStackOverflow(uintptr_t handler,
unsigned stackLimitOffsetFromThread) unsigned stackLimitOffsetFromThread)
{ {
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
lir::Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread); lir::Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
lir::Constant handlerConstant(new (con.zone) ResolvedPromise(handler)); lir::Constant handlerConstant(new (con.zone) ResolvedPromise(handler));
branchRM(&con, branchRM(&con,
@ -671,11 +694,11 @@ class MyAssembler : public Assembler {
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset) virtual void saveFrame(unsigned stackOffset, unsigned ipOffset)
{ {
lir::Register link(LinkRegister); lir::RegisterPair link(LinkRegister);
lir::Memory linkDst(ThreadRegister, ipOffset); lir::Memory linkDst(ThreadRegister, ipOffset);
moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst); moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst);
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
lir::Memory stackDst(ThreadRegister, stackOffset); lir::Memory stackDst(ThreadRegister, stackOffset);
moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst); moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
} }
@ -684,7 +707,7 @@ class MyAssembler : public Assembler {
{ {
struct Argument { struct Argument {
unsigned size; unsigned size;
lir::OperandType type; lir::Operand::Type type;
lir::Operand* operand; lir::Operand* operand;
}; };
RUNTIME_ARRAY(Argument, arguments, argumentCount); RUNTIME_ARRAY(Argument, arguments, argumentCount);
@ -695,7 +718,7 @@ class MyAssembler : public Assembler {
for (unsigned i = 0; i < argumentCount; ++i) { for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned); RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
RUNTIME_ARRAY_BODY(arguments)[i].type RUNTIME_ARRAY_BODY(arguments)[i].type
= static_cast<lir::OperandType>(va_arg(a, int)); = static_cast<lir::Operand::Type>(va_arg(a, int));
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*); RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord); TargetBytesPerWord);
@ -707,7 +730,7 @@ class MyAssembler : public Assembler {
unsigned offset = 0; unsigned offset = 0;
for (unsigned i = 0; i < argumentCount; ++i) { for (unsigned i = 0; i < argumentCount; ++i) {
if (i < arch_->argumentRegisterCount()) { if (i < arch_->argumentRegisterCount()) {
lir::Register dst(arch_->argumentRegister(i)); lir::RegisterPair dst(arch_->argumentRegister(i));
apply(lir::Move, apply(lir::Move,
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -715,7 +738,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand), RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord), TargetBytesPerWord),
lir::RegisterOperand, lir::Operand::Type::RegisterPair,
&dst)); &dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -729,7 +752,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand), RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord), TargetBytesPerWord),
lir::MemoryOperand, lir::Operand::Type::Memory,
&dst)); &dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -747,12 +770,37 @@ class MyAssembler : public Assembler {
// how to handle them: // how to handle them:
assertT(&con, footprint < 256); assertT(&con, footprint < 256);
lir::Register stack(StackRegister); // todo: the ARM ABI says the frame preamble should be of the form
//
// stp x29, x30, [sp,#-footprint]!
// mov x29, sp
//
// and the frame should be popped with e.g.
//
// ldp x29, x30, [sp],#footprint
// br x30
//
// However, that will invalidate a lot of assumptions elsewhere
// about the return address being stored at the opposite end of
// the frame, so lots of other code will need to change before we
// can do that. The code below can be enabled as a starting point
// when we're ready to tackle that.
if (false and TargetBytesPerWord == 8) {
// stp x29, x30, [sp,#-footprint]!
con.code.append4(0xa9800000 | ((-footprint & 0x7f) << 15)
| (StackRegister.index() << 5)
| (LinkRegister.index() << 10) | FrameRegister.index());
lir::RegisterPair stack(StackRegister);
lir::RegisterPair frame(FrameRegister);
moveRR(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &frame);
} else {
lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord); ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise); lir::Constant footprintConstant(&footprintPromise);
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack); subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
lir::Register returnAddress(LinkRegister); lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressDst(StackRegister, lir::Memory returnAddressDst(StackRegister,
(footprint - 1) * TargetBytesPerWord); (footprint - 1) * TargetBytesPerWord);
moveRM(&con, moveRM(&con,
@ -761,10 +809,11 @@ class MyAssembler : public Assembler {
TargetBytesPerWord, TargetBytesPerWord,
&returnAddressDst); &returnAddressDst);
} }
}
virtual void adjustFrame(unsigned difference) virtual void adjustFrame(unsigned difference)
{ {
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
ResolvedPromise differencePromise(difference * TargetBytesPerWord); ResolvedPromise differencePromise(difference * TargetBytesPerWord);
lir::Constant differenceConstant(&differencePromise); lir::Constant differenceConstant(&differencePromise);
subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack); subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack);
@ -774,7 +823,13 @@ class MyAssembler : public Assembler {
{ {
footprint += FrameHeaderSize; footprint += FrameHeaderSize;
lir::Register returnAddress(LinkRegister); // see comment regarding the ARM64 ABI in allocateFrame
if (false and TargetBytesPerWord == 8) {
// ldp x29, x30, [sp],#footprint
con.code.append4(0xa8c00000 | (footprint << 15) | (31 << 5) | (30 << 10)
| 29);
} else {
lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressSrc(StackRegister, lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord); (footprint - 1) * TargetBytesPerWord);
moveMR(&con, moveMR(&con,
@ -783,24 +838,25 @@ class MyAssembler : public Assembler {
TargetBytesPerWord, TargetBytesPerWord,
&returnAddress); &returnAddress);
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord); ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise); lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack); addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
} }
}
virtual void popFrameForTailCall(unsigned footprint, virtual void popFrameForTailCall(unsigned footprint,
int offset, int offset,
int returnAddressSurrogate, Register returnAddressSurrogate,
int framePointerSurrogate UNUSED) Register framePointerSurrogate UNUSED)
{ {
assertT(&con, framePointerSurrogate == lir::NoRegister); assertT(&con, framePointerSurrogate == NoRegister);
if (TailCalls) { if (TailCalls) {
if (offset) { if (offset) {
footprint += FrameHeaderSize; footprint += FrameHeaderSize;
lir::Register link(LinkRegister); lir::RegisterPair link(LinkRegister);
lir::Memory returnAddressSrc(StackRegister, lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord); (footprint - 1) * TargetBytesPerWord);
moveMR(&con, moveMR(&con,
@ -809,16 +865,16 @@ class MyAssembler : public Assembler {
TargetBytesPerWord, TargetBytesPerWord,
&link); &link);
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise((footprint - offset) ResolvedPromise footprintPromise((footprint - offset)
* TargetBytesPerWord); * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise); lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack); addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
if (returnAddressSurrogate != lir::NoRegister) { if (returnAddressSurrogate != NoRegister) {
assertT(&con, offset > 0); assertT(&con, offset > 0);
lir::Register ras(returnAddressSurrogate); lir::RegisterPair ras(returnAddressSurrogate);
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord); lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst); moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
} }
@ -842,7 +898,7 @@ class MyAssembler : public Assembler {
if (TailCalls and argumentFootprint > StackAlignmentInWords) { if (TailCalls and argumentFootprint > StackAlignmentInWords) {
offset = argumentFootprint - StackAlignmentInWords; offset = argumentFootprint - StackAlignmentInWords;
lir::Register stack(StackRegister); lir::RegisterPair stack(StackRegister);
ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord); ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord);
lir::Constant adjustment(&adjustmentPromise); lir::Constant adjustment(&adjustmentPromise);
addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack); addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack);
@ -853,12 +909,28 @@ class MyAssembler : public Assembler {
return_(&con); return_(&con);
} }
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint, virtual void popFrameAndUpdateStackAndReturn(unsigned footprint,
unsigned stackOffsetFromThread) unsigned stackOffsetFromThread)
{ {
popFrame(frameFootprint); footprint += FrameHeaderSize;
lir::Register stack(StackRegister); // see comment regarding the ARM64 ABI in allocateFrame
if (false and TargetBytesPerWord == 8) {
// ldp x29, x30, [sp],#footprint
con.code.append4(0xa8c00000 | (footprint << 15) | (31 << 5) | (30 << 10)
| 29);
} else {
lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveMR(&con,
TargetBytesPerWord,
&returnAddressSrc,
TargetBytesPerWord,
&returnAddress);
}
lir::RegisterPair stack(StackRegister);
lir::Memory newStackSrc(ThreadRegister, stackOffsetFromThread); lir::Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack); moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack);
@ -890,14 +962,14 @@ class MyAssembler : public Assembler {
if (isBranch(op)) { if (isBranch(op)) {
assertT(&con, a.size == b.size); assertT(&con, a.size == b.size);
assertT(&con, c.size == TargetBytesPerWord); assertT(&con, c.size == TargetBytesPerWord);
assertT(&con, c.type == lir::ConstantOperand); assertT(&con, c.type == lir::Operand::Type::Constant);
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)]( arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)](
&con, op, a.size, a.operand, b.operand, c.operand); &con, op, a.size, a.operand, b.operand, c.operand);
} else { } else {
assertT(&con, b.size == c.size); assertT(&con, b.size == c.size);
assertT(&con, b.type == lir::RegisterOperand); assertT(&con, b.type == lir::Operand::Type::RegisterPair);
assertT(&con, c.type == lir::RegisterOperand); assertT(&con, c.type == lir::Operand::Type::RegisterPair);
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)]( arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)](
&con, b.size, a.operand, b.operand, c.operand); &con, b.size, a.operand, b.operand, c.operand);
@ -948,17 +1020,28 @@ class MyAssembler : public Assembler {
unsigned instruction = o->block->start + padding(o->block, o->offset) unsigned instruction = o->block->start + padding(o->block, o->offset)
+ o->offset; + o->offset;
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
if (TargetBytesPerWord == 8) {
int32_t v = entry - instruction;
expect(&con, v == (v & PoolOffsetMask));
const int32_t mask = (PoolOffsetMask >> 2) << 5;
*p = (((v >> 2) << 5) & mask) | ((~mask) & *p);
} else {
int32_t v = (entry - 8) - instruction; int32_t v = (entry - 8) - instruction;
expect(&con, v == (v & PoolOffsetMask)); expect(&con, v == (v & PoolOffsetMask));
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p); *p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
}
poolSize += TargetBytesPerWord; poolSize += TargetBytesPerWord;
} }
bool jump = needJump(b); bool jump = needJump(b);
if (jump) { if (jump) {
expect(&con, TargetBytesPerWord == 4);
write4(dst + dstOffset, write4(dst + dstOffset,
isa::b((poolSize + TargetBytesPerWord - 8) >> 2)); isa::b((poolSize + TargetBytesPerWord - 8) >> 2));
} }

View File

@ -85,15 +85,15 @@ class ArchitectureContext {
vm::System* s; vm::System* s;
OperationType operations[lir::OperationCount]; OperationType operations[lir::OperationCount];
UnaryOperationType UnaryOperationType
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount]; unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
BinaryOperationType binaryOperations[lir::BinaryOperationCount BinaryOperationType binaryOperations[lir::BinaryOperationCount
* lir::OperandTypeCount * lir::Operand::TypeCount
* lir::OperandTypeCount]; * lir::Operand::TypeCount];
TernaryOperationType ternaryOperations[lir::NonBranchTernaryOperationCount TernaryOperationType ternaryOperations[lir::NonBranchTernaryOperationCount
* lir::OperandTypeCount]; * lir::Operand::TypeCount];
BranchOperationType branchOperations[lir::BranchOperationCount BranchOperationType branchOperations[lir::BranchOperationCount
* lir::OperandTypeCount * lir::Operand::TypeCount
* lir::OperandTypeCount]; * lir::Operand::TypeCount];
}; };
inline avian::util::Aborter* getAborter(Context* c) inline avian::util::Aborter* getAborter(Context* c)

View File

@ -46,34 +46,34 @@ enum CONDITION {
enum SHIFTOP { LSL, LSR, ASR, ROR }; enum SHIFTOP { LSL, LSR, ASR, ROR };
// INSTRUCTION FORMATS // INSTRUCTION FORMATS
inline int inline int
DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm) DATA(int cond, int opcode, int S, Register Rn, Register Rd, int shift, int Sh, Register Rm)
{ {
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | shift << 7 return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | shift << 7
| Sh << 5 | Rm; | Sh << 5 | Rm.index();
} }
inline int inline int
DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm) DATAS(int cond, int opcode, int S, Register Rn, Register Rd, Register Rs, int Sh, Register Rm)
{ {
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | Rs << 8 return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | Rs.index() << 8
| Sh << 5 | 1 << 4 | Rm; | Sh << 5 | 1 << 4 | Rm.index();
} }
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm) inline int DATAI(int cond, int opcode, int S, Register Rn, Register Rd, int rot, int imm)
{ {
return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12
| rot << 8 | (imm & 0xff); | rot << 8 | (imm & 0xff);
} }
inline int BRANCH(int cond, int L, int offset) inline int BRANCH(int cond, int L, int offset)
{ {
return cond << 28 | 5 << 25 | L << 24 | (offset & 0xffffff); return cond << 28 | 5 << 25 | L << 24 | (offset & 0xffffff);
} }
inline int BRANCHX(int cond, int L, int Rm) inline int BRANCHX(int cond, int L, Register Rm)
{ {
return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm; return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm.index();
} }
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm) inline int MULTIPLY(int cond, int mul, int S, Register Rd, Register Rn, Register Rs, Register Rm)
{ {
return cond << 28 | mul << 21 | S << 20 | Rd << 16 | Rn << 12 | Rs << 8 return cond << 28 | mul << 21 | S << 20 | Rd.index() << 16 | Rn.index() << 12 | Rs.index() << 8
| 9 << 4 | Rm; | 9 << 4 | Rm.index();
} }
inline int XFER(int cond, inline int XFER(int cond,
int P, int P,
@ -81,14 +81,14 @@ inline int XFER(int cond,
int B, int B,
int W, int W,
int L, int L,
int Rn, Register Rn,
int Rd, Register Rd,
int shift, int shift,
int Sh, int Sh,
int Rm) Register Rm)
{ {
return cond << 28 | 3 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20 return cond << 28 | 3 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
| Rn << 16 | Rd << 12 | shift << 7 | Sh << 5 | Rm; | Rn.index() << 16 | Rd.index() << 12 | shift << 7 | Sh << 5 | Rm.index();
} }
inline int XFERI(int cond, inline int XFERI(int cond,
int P, int P,
@ -96,41 +96,41 @@ inline int XFERI(int cond,
int B, int B,
int W, int W,
int L, int L,
int Rn, Register Rn,
int Rd, Register Rd,
int offset) int offset)
{ {
return cond << 28 | 2 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20 return cond << 28 | 2 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
| Rn << 16 | Rd << 12 | (offset & 0xfff); | Rn.index() << 16 | Rd.index() << 12 | (offset & 0xfff);
} }
inline int XFER2(int cond, inline int XFER2(int cond,
int P, int P,
int U, int U,
int W, int W,
int L, int L,
int Rn, Register Rn,
int Rd, Register Rd,
int S, int S,
int H, int H,
int Rm) Register Rm)
{ {
return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn << 16 return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn.index() << 16
| Rd << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm; | Rd.index() << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm.index();
} }
inline int XFER2I(int cond, inline int XFER2I(int cond,
int P, int P,
int U, int U,
int W, int W,
int L, int L,
int Rn, Register Rn,
int Rd, Register Rd,
int offsetH, int offsetH,
int S, int S,
int H, int H,
int offsetL) int offsetL)
{ {
return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn << 16 return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn.index() << 16
| Rd << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rd.index() << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4
| (offsetL & 0xf); | (offsetL & 0xf);
} }
inline int COOP(int cond, inline int COOP(int cond,
@ -150,30 +150,30 @@ inline int COXFER(int cond,
int N, int N,
int W, int W,
int L, int L,
int Rn, Register Rn,
int CRd, int CRd,
int cp_num, int cp_num,
int offset) // offset is in words, not bytes int offset) // offset is in words, not bytes
{ {
return cond << 28 | 0x6 << 25 | P << 24 | U << 23 | N << 22 | W << 21 return cond << 28 | 0x6 << 25 | P << 24 | U << 23 | N << 22 | W << 21
| L << 20 | Rn << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2; | L << 20 | Rn.index() << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2;
} }
inline int COREG(int cond, inline int COREG(int cond,
int opcode_1, int opcode_1,
int L, int L,
int CRn, int CRn,
int Rd, Register Rd,
int cp_num, int cp_num,
int opcode_2, int opcode_2,
int CRm) int CRm)
{ {
return cond << 28 | 0xe << 24 | opcode_1 << 21 | L << 20 | CRn << 16 return cond << 28 | 0xe << 24 | opcode_1 << 21 | L << 20 | CRn << 16
| Rd << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm; | Rd.index() << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm;
} }
inline int inline int
COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm) COREG2(int cond, int L, Register Rn, Register Rd, int cp_num, int opcode, int CRm)
{ {
return cond << 28 | 0xc4 << 20 | L << 20 | Rn << 16 | Rd << 12 | cp_num << 8 return cond << 28 | 0xc4 << 20 | L << 20 | Rn.index() << 16 | Rd.index() << 12 | cp_num << 8
| opcode << 4 | CRm; | opcode << 4 | CRm;
} }
// FIELD CALCULATORS // FIELD CALCULATORS
@ -191,143 +191,143 @@ inline int bl(int offset)
{ {
return BRANCH(AL, 1, offset); return BRANCH(AL, 1, offset);
} }
inline int bx(int Rm) inline int bx(Register Rm)
{ {
return BRANCHX(AL, 0, Rm); return BRANCHX(AL, 0, Rm);
} }
inline int blx(int Rm) inline int blx(Register Rm)
{ {
return BRANCHX(AL, 1, Rm); return BRANCHX(AL, 1, Rm);
} }
inline int and_(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int and_(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int eor(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int eor(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int rsb(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int rsb(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int add(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int add(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int adc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int adc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int rsc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int rsc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int cmp(int Rn, int Rm, int Sh = 0, int shift = 0) inline int cmp(Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm); return DATA(AL, 0xa, 1, Rn, Register(0), shift, Sh, Rm);
} }
inline int orr(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0) inline int orr(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm); return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm);
} }
inline int mov(int Rd, int Rm, int Sh = 0, int shift = 0) inline int mov(Register Rd, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm); return DATA(AL, 0xd, 0, Register(0), Rd, shift, Sh, Rm);
} }
inline int mvn(int Rd, int Rm, int Sh = 0, int shift = 0) inline int mvn(Register Rd, Register Rm, int Sh = 0, int shift = 0)
{ {
return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm); return DATA(AL, 0xf, 0, Register(0), Rd, shift, Sh, Rm);
} }
inline int andi(int Rd, int Rn, int imm, int rot = 0) inline int andi(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm); return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm);
} }
inline int subi(int Rd, int Rn, int imm, int rot = 0) inline int subi(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm); return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm);
} }
inline int rsbi(int Rd, int Rn, int imm, int rot = 0) inline int rsbi(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm); return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm);
} }
inline int addi(int Rd, int Rn, int imm, int rot = 0) inline int addi(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm); return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm);
} }
inline int adci(int Rd, int Rn, int imm, int rot = 0) inline int adci(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm); return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm);
} }
inline int bici(int Rd, int Rn, int imm, int rot = 0) inline int bici(Register Rd, Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm); return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm);
} }
inline int cmpi(int Rn, int imm, int rot = 0) inline int cmpi(Register Rn, int imm, int rot = 0)
{ {
return DATAI(AL, 0xa, 1, Rn, 0, rot, imm); return DATAI(AL, 0xa, 1, Rn, Register(0), rot, imm);
} }
inline int movi(int Rd, int imm, int rot = 0) inline int movi(Register Rd, int imm, int rot = 0)
{ {
return DATAI(AL, 0xd, 0, 0, Rd, rot, imm); return DATAI(AL, 0xd, 0, Register(0), Rd, rot, imm);
} }
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh) inline int orrsh(Register Rd, Register Rn, Register Rm, Register Rs, int Sh)
{ {
return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm); return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm);
} }
inline int movsh(int Rd, int Rm, int Rs, int Sh) inline int movsh(Register Rd, Register Rm, Register Rs, int Sh)
{ {
return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm); return DATAS(AL, 0xd, 0, Register(0), Rd, Rs, Sh, Rm);
} }
inline int mul(int Rd, int Rm, int Rs) inline int mul(Register Rd, Register Rm, Register Rs)
{ {
return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm); return MULTIPLY(AL, 0, 0, Rd, Register(0), Rs, Rm);
} }
inline int mla(int Rd, int Rm, int Rs, int Rn) inline int mla(Register Rd, Register Rm, Register Rs, Register Rn)
{ {
return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm); return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm);
} }
inline int umull(int RdLo, int RdHi, int Rm, int Rs) inline int umull(Register RdLo, Register RdHi, Register Rm, Register Rs)
{ {
return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm); return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm);
} }
inline int ldr(int Rd, int Rn, int Rm, int W = 0) inline int ldr(Register Rd, Register Rn, Register Rm, int W = 0)
{ {
return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm); return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm);
} }
inline int ldri(int Rd, int Rn, int imm, int W = 0) inline int ldri(Register Rd, Register Rn, int imm, int W = 0)
{ {
return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm)); return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm));
} }
inline int ldrb(int Rd, int Rn, int Rm) inline int ldrb(Register Rd, Register Rn, Register Rm)
{ {
return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm); return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm);
} }
inline int ldrbi(int Rd, int Rn, int imm) inline int ldrbi(Register Rd, Register Rn, int imm)
{ {
return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm)); return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm));
} }
inline int str(int Rd, int Rn, int Rm, int W = 0) inline int str(Register Rd, Register Rn, Register Rm, int W = 0)
{ {
return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm); return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm);
} }
inline int stri(int Rd, int Rn, int imm, int W = 0) inline int stri(Register Rd, Register Rn, int imm, int W = 0)
{ {
return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm)); return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm));
} }
inline int strb(int Rd, int Rn, int Rm) inline int strb(Register Rd, Register Rn, Register Rm)
{ {
return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm); return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm);
} }
inline int strbi(int Rd, int Rn, int imm) inline int strbi(Register Rd, Register Rn, int imm)
{ {
return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm)); return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm));
} }
inline int ldrh(int Rd, int Rn, int Rm) inline int ldrh(Register Rd, Register Rn, Register Rm)
{ {
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm); return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm);
} }
inline int ldrhi(int Rd, int Rn, int imm) inline int ldrhi(Register Rd, Register Rn, int imm)
{ {
return XFER2I(AL, return XFER2I(AL,
1, 1,
@ -341,11 +341,11 @@ inline int ldrhi(int Rd, int Rn, int imm)
1, 1,
abs(imm) & 0xf); abs(imm) & 0xf);
} }
inline int strh(int Rd, int Rn, int Rm) inline int strh(Register Rd, Register Rn, Register Rm)
{ {
return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm); return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm);
} }
inline int strhi(int Rd, int Rn, int imm) inline int strhi(Register Rd, Register Rn, int imm)
{ {
return XFER2I(AL, return XFER2I(AL,
1, 1,
@ -359,11 +359,11 @@ inline int strhi(int Rd, int Rn, int imm)
1, 1,
abs(imm) & 0xf); abs(imm) & 0xf);
} }
inline int ldrsh(int Rd, int Rn, int Rm) inline int ldrsh(Register Rd, Register Rn, Register Rm)
{ {
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm); return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm);
} }
inline int ldrshi(int Rd, int Rn, int imm) inline int ldrshi(Register Rd, Register Rn, int imm)
{ {
return XFER2I(AL, return XFER2I(AL,
1, 1,
@ -377,11 +377,11 @@ inline int ldrshi(int Rd, int Rn, int imm)
1, 1,
abs(imm) & 0xf); abs(imm) & 0xf);
} }
inline int ldrsb(int Rd, int Rn, int Rm) inline int ldrsb(Register Rd, Register Rn, Register Rm)
{ {
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm); return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm);
} }
inline int ldrsbi(int Rd, int Rn, int imm) inline int ldrsbi(Register Rd, Register Rn, int imm)
{ {
return XFER2I(AL, return XFER2I(AL,
1, 1,
@ -403,27 +403,27 @@ inline int bkpt(int16_t immed)
// COPROCESSOR INSTRUCTIONS // COPROCESSOR INSTRUCTIONS
inline int mcr(int coproc, inline int mcr(int coproc,
int opcode_1, int opcode_1,
int Rd, Register Rd,
int CRn, int CRn,
int CRm, int CRm,
int opcode_2 = 0) int opcode_2 = 0)
{ {
return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm); return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm);
} }
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm) inline int mcrr(int coproc, int opcode, Register Rd, Register Rn, int CRm)
{ {
return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm); return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm);
} }
inline int mrc(int coproc, inline int mrc(int coproc,
int opcode_1, int opcode_1,
int Rd, Register Rd,
int CRn, int CRn,
int CRm, int CRm,
int opcode_2 = 0) int opcode_2 = 0)
{ {
return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm); return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm);
} }
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm) inline int mrrc(int coproc, int opcode, Register Rd, Register Rn, int CRm)
{ {
return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm); return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm);
} }
@ -551,42 +551,42 @@ inline int ftosizd(int Sd, int Dm)
return COOP(AL, 0xb | (Sd & 1) << 2, 0xd, Sd >> 1, 11, 6, Dm); return COOP(AL, 0xb | (Sd & 1) << 2, 0xd, Sd >> 1, 11, 6, Dm);
} }
// single load/store instructions for both precision types // single load/store instructions for both precision types
inline int flds(int Sd, int Rn, int offset = 0) inline int flds(int Sd, Register Rn, int offset = 0)
{ {
return COXFER(AL, 1, 1, Sd & 1, 0, 1, Rn, Sd >> 1, 10, offset); return COXFER(AL, 1, 1, Sd & 1, 0, 1, Rn, Sd >> 1, 10, offset);
}; };
inline int fldd(int Dd, int Rn, int offset = 0) inline int fldd(int Dd, Register Rn, int offset = 0)
{ {
return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset); return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset);
}; };
inline int fsts(int Sd, int Rn, int offset = 0) inline int fsts(int Sd, Register Rn, int offset = 0)
{ {
return COXFER(AL, 1, 1, Sd & 1, 0, 0, Rn, Sd >> 1, 10, offset); return COXFER(AL, 1, 1, Sd & 1, 0, 0, Rn, Sd >> 1, 10, offset);
}; };
inline int fstd(int Dd, int Rn, int offset = 0) inline int fstd(int Dd, Register Rn, int offset = 0)
{ {
return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset); return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset);
}; };
// move between GPRs and FPRs // move between GPRs and FPRs
inline int fmsr(int Sn, int Rd) inline int fmsr(int Sn, Register Rd)
{ {
return mcr(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2); return mcr(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
} }
inline int fmrs(int Rd, int Sn) inline int fmrs(Register Rd, int Sn)
{ {
return mrc(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2); return mrc(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
} }
// move to/from VFP system registers // move to/from VFP system registers
inline int fmrx(int Rd, int reg) inline int fmrx(Register Rd, int reg)
{ {
return mrc(10, 7, Rd, reg, 0); return mrc(10, 7, Rd, reg, 0);
} }
// these move around pairs of single-precision registers // these move around pairs of single-precision registers
inline int fmdrr(int Dm, int Rd, int Rn) inline int fmdrr(int Dm, Register Rd, Register Rn)
{ {
return mcrr(11, 1, Rd, Rn, Dm); return mcrr(11, 1, Rd, Rn, Dm);
} }
inline int fmrrd(int Rd, int Rn, int Dm) inline int fmrrd(Register Rd, Register Rn, int Dm)
{ {
return mrrc(11, 1, Rd, Rn, Dm); return mrrc(11, 1, Rd, Rn, Dm);
} }
@ -600,27 +600,27 @@ inline int SETS(int ins)
return ins | 1 << 20; return ins | 1 << 20;
} }
// PSEUDO-INSTRUCTIONS // PSEUDO-INSTRUCTIONS
inline int lsl(int Rd, int Rm, int Rs) inline int lsl(Register Rd, Register Rm, Register Rs)
{ {
return movsh(Rd, Rm, Rs, LSL); return movsh(Rd, Rm, Rs, LSL);
} }
inline int lsli(int Rd, int Rm, int imm) inline int lsli(Register Rd, Register Rm, int imm)
{ {
return mov(Rd, Rm, LSL, imm); return mov(Rd, Rm, LSL, imm);
} }
inline int lsr(int Rd, int Rm, int Rs) inline int lsr(Register Rd, Register Rm, Register Rs)
{ {
return movsh(Rd, Rm, Rs, LSR); return movsh(Rd, Rm, Rs, LSR);
} }
inline int lsri(int Rd, int Rm, int imm) inline int lsri(Register Rd, Register Rm, int imm)
{ {
return mov(Rd, Rm, LSR, imm); return mov(Rd, Rm, LSR, imm);
} }
inline int asr(int Rd, int Rm, int Rs) inline int asr(Register Rd, Register Rm, Register Rs)
{ {
return movsh(Rd, Rm, Rs, ASR); return movsh(Rd, Rm, Rs, ASR);
} }
inline int asri(int Rd, int Rm, int imm) inline int asri(Register Rd, Register Rm, int imm)
{ {
return mov(Rd, Rm, ASR, imm); return mov(Rd, Rm, ASR, imm);
} }
@ -670,7 +670,7 @@ inline int bpl(int offset)
} }
inline int fmstat() inline int fmstat()
{ {
return fmrx(15, FPSCR); return fmrx(Register(15), FPSCR);
} }
// todo: make this pretty: // todo: make this pretty:
inline int dmb() inline int dmb()

View File

@ -12,6 +12,12 @@
#include "fixup.h" #include "fixup.h"
#include "block.h" #include "block.h"
namespace {
const unsigned InstructionSize = 4;
} // namespace
namespace avian { namespace avian {
namespace codegen { namespace codegen {
namespace arm { namespace arm {
@ -38,8 +44,7 @@ int64_t OffsetPromise::value()
assertT(con, resolved()); assertT(con, resolved());
unsigned o = offset - block->offset; unsigned o = offset - block->offset;
return block->start return block->start + padding(block, forTrace ? o - InstructionSize : o) + o;
+ padding(block, forTrace ? o - vm::TargetBytesPerWord : o) + o;
} }
Promise* offsetPromise(Context* con, bool forTrace) Promise* offsetPromise(Context* con, bool forTrace)
@ -92,17 +97,30 @@ bool bounded(int right, int left, int32_t v)
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value) void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value)
{ {
// ARM's PC is two words ahead, and branches drop the bottom 2 bits.
int32_t v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
int32_t mask;
expect(s, bounded(0, 8, v));
mask = 0xFFFFFF;
int32_t* p = reinterpret_cast<int32_t*>(instruction); int32_t* p = reinterpret_cast<int32_t*>(instruction);
int32_t v;
int32_t mask;
if (vm::TargetBytesPerWord == 8) {
if ((*p >> 24) == 0x54) {
// conditional branch
v = ((reinterpret_cast<uint8_t*>(value) - instruction) >> 2) << 5;
mask = 0xFFFFE0;
} else {
// unconditional branch
v = (reinterpret_cast<uint8_t*>(value) - instruction) >> 2;
mask = 0x3FFFFFF;
}
} else {
v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
mask = 0xFFFFFF;
}
expect(s, bounded(0, 8, v));
*p = (v & mask) | ((~mask) & *p); *p = (v & mask) | ((~mask) & *p);
return instruction + 4; return instruction + InstructionSize;
} }
ConstantPoolEntry::ConstantPoolEntry(Context* con, ConstantPoolEntry::ConstantPoolEntry(Context* con,
@ -214,6 +232,101 @@ void appendPoolEvent(Context* con,
b->poolEventTail = e; b->poolEventTail = e;
} }
bool needJump(MyBlock* b)
{
return b->next or b->size != (b->size & PoolOffsetMask);
}
unsigned padding(MyBlock* b, unsigned offset)
{
unsigned total = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
if (e->offset <= offset) {
if (needJump(b)) {
total += vm::TargetBytesPerWord;
}
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
total += vm::TargetBytesPerWord;
}
} else {
break;
}
}
return total;
}
void resolve(MyBlock* b)
{
Context* con = b->context;
if (b->poolOffsetHead) {
if (con->poolOffsetTail) {
con->poolOffsetTail->next = b->poolOffsetHead;
} else {
con->poolOffsetHead = b->poolOffsetHead;
}
con->poolOffsetTail = b->poolOffsetTail;
}
if (con->poolOffsetHead) {
bool append;
if (b->next == 0 or b->next->poolEventHead) {
append = true;
} else {
int32_t v
= (b->start + b->size + b->next->size + vm::TargetBytesPerWord - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
append = (v != (v & PoolOffsetMask));
if (DebugPool) {
fprintf(stderr,
"current %p %d %d next %p %d %d\n",
b,
b->start,
b->size,
b->next,
b->start + b->size,
b->next->size);
fprintf(stderr,
"offset %p %d is of distance %d to next block; append? %d\n",
con->poolOffsetHead,
con->poolOffsetHead->offset,
v,
append);
}
}
if (append) {
#ifndef NDEBUG
int32_t v
= (b->start + b->size - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
expect(con, v == (v & PoolOffsetMask));
#endif // not NDEBUG
appendPoolEvent(
con, b, b->size, con->poolOffsetHead, con->poolOffsetTail);
if (DebugPool) {
for (PoolOffset* o = con->poolOffsetHead; o; o = o->next) {
fprintf(stderr,
"include %p %d in pool event %p at offset %d in block %p\n",
o,
o->offset,
b->poolEventTail,
b->size,
b);
}
}
con->poolOffsetHead = 0;
con->poolOffsetTail = 0;
}
}
}
} // namespace arm } // namespace arm
} // namespace codegen } // namespace codegen
} // namespace avian } // namespace avian

View File

@ -27,7 +27,7 @@ namespace arm {
const bool DebugPool = false; const bool DebugPool = false;
const int32_t PoolOffsetMask = 0xFFF; const int32_t PoolOffsetMask = vm::TargetBytesPerWord == 8 ? 0x1FFFFF : 0xFFF;
class Task { class Task {
public: public:

View File

@ -22,35 +22,35 @@ using namespace util;
unsigned index(ArchitectureContext*, unsigned index(ArchitectureContext*,
lir::BinaryOperation operation, lir::BinaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2) lir::Operand::Type operand2)
{ {
return operation + (lir::BinaryOperationCount * operand1) return operation + (lir::BinaryOperationCount * (unsigned)operand1)
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2); + (lir::BinaryOperationCount * lir::Operand::TypeCount * (unsigned)operand2);
} }
unsigned index(ArchitectureContext* con UNUSED, unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation, lir::TernaryOperation operation,
lir::OperandType operand1) lir::Operand::Type operand1)
{ {
assertT(con, not isBranch(operation)); assertT(con, not isBranch(operation));
return operation + (lir::NonBranchTernaryOperationCount * operand1); return operation + (lir::NonBranchTernaryOperationCount * (unsigned)operand1);
} }
unsigned branchIndex(ArchitectureContext* con UNUSED, unsigned branchIndex(ArchitectureContext* con UNUSED,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2) lir::Operand::Type operand2)
{ {
return operand1 + (lir::OperandTypeCount * operand2); return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
} }
void populateTables(ArchitectureContext* con) void populateTables(ArchitectureContext* con)
{ {
const lir::OperandType C = lir::ConstantOperand; const lir::Operand::Type C = lir::Operand::Type::Constant;
const lir::OperandType A = lir::AddressOperand; const lir::Operand::Type A = lir::Operand::Type::Address;
const lir::OperandType R = lir::RegisterOperand; const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
const lir::OperandType M = lir::MemoryOperand; const lir::Operand::Type M = lir::Operand::Type::Memory;
OperationType* zo = con->operations; OperationType* zo = con->operations;
UnaryOperationType* uo = con->unaryOperations; UnaryOperationType* uo = con->unaryOperations;

View File

@ -25,16 +25,16 @@ namespace arm {
unsigned index(ArchitectureContext*, unsigned index(ArchitectureContext*,
lir::BinaryOperation operation, lir::BinaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2); lir::Operand::Type operand2);
unsigned index(ArchitectureContext* con UNUSED, unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation, lir::TernaryOperation operation,
lir::OperandType operand1); lir::Operand::Type operand1);
unsigned branchIndex(ArchitectureContext* con UNUSED, unsigned branchIndex(ArchitectureContext* con UNUSED,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2); lir::Operand::Type operand2);
void populateTables(ArchitectureContext* con); void populateTables(ArchitectureContext* con);

View File

@ -25,17 +25,17 @@ class Context;
// shortcut functions // shortcut functions
inline int newTemp(Context* con) inline Register newTemp(Context* con)
{ {
return con->client->acquireTemporary(GPR_MASK); return con->client->acquireTemporary(GPR_MASK);
} }
inline int newTemp(Context* con, unsigned mask) inline Register newTemp(Context* con, RegisterMask mask)
{ {
return con->client->acquireTemporary(mask); return con->client->acquireTemporary(mask);
} }
inline void freeTemp(Context* con, int r) inline void freeTemp(Context* con, Register r)
{ {
con->client->releaseTemporary(r); con->client->releaseTemporary(r);
} }
@ -45,67 +45,67 @@ inline int64_t getValue(lir::Constant* con)
return con->value->value(); return con->value->value();
} }
inline lir::Register makeTemp(Context* con) inline lir::RegisterPair makeTemp(Context* con)
{ {
lir::Register tmp(newTemp(con)); lir::RegisterPair tmp(newTemp(con));
return tmp; return tmp;
} }
inline lir::Register makeTemp64(Context* con) inline lir::RegisterPair makeTemp64(Context* con)
{ {
lir::Register tmp(newTemp(con), newTemp(con)); lir::RegisterPair tmp(newTemp(con), newTemp(con));
return tmp; return tmp;
} }
inline void freeTemp(Context* con, const lir::Register& tmp) inline void freeTemp(Context* con, const lir::RegisterPair& tmp)
{ {
if (tmp.low != lir::NoRegister) if (tmp.low != NoRegister)
freeTemp(con, tmp.low); freeTemp(con, tmp.low);
if (tmp.high != lir::NoRegister) if (tmp.high != NoRegister)
freeTemp(con, tmp.high); freeTemp(con, tmp.high);
} }
void shiftLeftR(Context* con, void shiftLeftR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void moveRR(Context* con, void moveRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void shiftLeftC(Context* con, void shiftLeftC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void shiftRightR(Context* con, void shiftRightR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void shiftRightC(Context* con, void shiftRightC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void unsignedShiftRightR(Context* con, void unsignedShiftRightR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void unsignedShiftRightC(Context* con, void unsignedShiftRightC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
bool needJump(MyBlock* b); bool needJump(MyBlock* b);
@ -113,133 +113,133 @@ unsigned padding(MyBlock* b, unsigned offset);
void resolve(MyBlock* b); void resolve(MyBlock* b);
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target); void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
void swapRR(Context* con, void swapRR(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveRR(Context* con, void moveRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void moveZRR(Context* con, void moveZRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned, unsigned,
lir::Register* dst); lir::RegisterPair* dst);
void moveCR(Context* con, void moveCR(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
unsigned, unsigned,
lir::Register* dst); lir::RegisterPair* dst);
void moveCR2(Context* con, void moveCR2(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
lir::Register* dst, lir::RegisterPair* dst,
Promise* callOffset); Promise* callOffset);
void moveCR(Context* con, void moveCR(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
unsigned, unsigned,
lir::Register* dst); lir::RegisterPair* dst);
void addR(Context* con, void addR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void subR(Context* con, void subR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void addC(Context* con, void addC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void subC(Context* con, void subC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void multiplyR(Context* con, void multiplyR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void floatAbsoluteRR(Context* con, void floatAbsoluteRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b); lir::RegisterPair* b);
void floatNegateRR(Context* con, void floatNegateRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b); lir::RegisterPair* b);
void float2FloatRR(Context* con, void float2FloatRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b); lir::RegisterPair* b);
void float2IntRR(Context* con, void float2IntRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b); lir::RegisterPair* b);
void int2FloatRR(Context* con, void int2FloatRR(Context* con,
unsigned, unsigned,
lir::Register* a, lir::RegisterPair* a,
unsigned size, unsigned size,
lir::Register* b); lir::RegisterPair* b);
void floatSqrtRR(Context* con, void floatSqrtRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b); lir::RegisterPair* b);
void floatAddR(Context* con, void floatAddR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void floatSubtractR(Context* con, void floatSubtractR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void floatMultiplyR(Context* con, void floatMultiplyR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
void floatDivideR(Context* con, void floatDivideR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t); lir::RegisterPair* t);
int normalize(Context* con, int normalize(Context* con,
int offset, int offset,
@ -250,7 +250,7 @@ int normalize(Context* con,
void store(Context* con, void store(Context* con,
unsigned size, unsigned size,
lir::Register* src, lir::RegisterPair* src,
int base, int base,
int offset, int offset,
int index, int index,
@ -259,7 +259,7 @@ void store(Context* con,
void moveRM(Context* con, void moveRM(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize UNUSED, unsigned dstSize UNUSED,
lir::Memory* dst); lir::Memory* dst);
@ -270,7 +270,7 @@ void load(Context* con,
int index, int index,
unsigned scale, unsigned scale,
unsigned dstSize, unsigned dstSize,
lir::Register* dst, lir::RegisterPair* dst,
bool preserveIndex, bool preserveIndex,
bool signExtend); bool signExtend);
@ -278,61 +278,61 @@ void moveMR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Memory* src, lir::Memory* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void moveZMR(Context* con, void moveZMR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Memory* src, lir::Memory* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void andR(Context* con, void andR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void andC(Context* con, void andC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void orR(Context* con, void orR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void xorR(Context* con, void xorR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void moveAR2(Context* con, void moveAR2(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Address* src, lir::Address* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void moveAR(Context* con, void moveAR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Address* src, lir::Address* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void compareRR(Context* con, void compareRR(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void compareCR(Context* con, void compareCR(Context* con,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void compareCM(Context* con, void compareCM(Context* con,
unsigned aSize, unsigned aSize,
@ -342,7 +342,7 @@ void compareCM(Context* con,
void compareRM(Context* con, void compareRM(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Memory* b); lir::Memory* b);
@ -365,21 +365,21 @@ void branchLong(Context* con,
void branchRR(Context* con, void branchRR(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target); lir::Constant* target);
void branchCR(Context* con, void branchCR(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target); lir::Constant* target);
void branchRM(Context* con, void branchRM(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Memory* b, lir::Memory* b,
lir::Constant* target); lir::Constant* target);
@ -403,11 +403,11 @@ void moveCM(Context* con,
void negateRR(Context* con, void negateRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize UNUSED, unsigned dstSize UNUSED,
lir::Register* dst); lir::RegisterPair* dst);
void callR(Context* con, unsigned size UNUSED, lir::Register* target); void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
void callC(Context* con, unsigned size UNUSED, lir::Constant* target); void callC(Context* con, unsigned size UNUSED, lir::Constant* target);

View File

@ -15,6 +15,8 @@
#include "fixup.h" #include "fixup.h"
#include "multimethod.h" #include "multimethod.h"
#if TARGET_BYTES_PER_WORD == 4
namespace avian { namespace avian {
namespace codegen { namespace codegen {
namespace arm { namespace arm {
@ -35,20 +37,20 @@ inline unsigned lo8(int64_t i)
void andC(Context* con, void andC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst); lir::RegisterPair* dst);
void shiftLeftR(Context* con, void shiftLeftR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con); Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
ResolvedPromise maskPromise(0x3F); ResolvedPromise maskPromise(0x3F);
lir::Constant mask(&maskPromise); lir::Constant mask(&maskPromise);
lir::Register dst(tmp3); lir::RegisterPair dst(tmp3);
andC(con, 4, &mask, a, &dst); andC(con, 4, &mask, a, &dst);
emit(con, lsl(tmp1, b->high, tmp3)); emit(con, lsl(tmp1, b->high, tmp3));
emit(con, rsbi(tmp2, tmp3, 32)); emit(con, rsbi(tmp2, tmp3, 32));
@ -61,10 +63,10 @@ void shiftLeftR(Context* con,
freeTemp(con, tmp2); freeTemp(con, tmp2);
freeTemp(con, tmp3); freeTemp(con, tmp3);
} else { } else {
int tmp = newTemp(con); Register tmp = newTemp(con);
ResolvedPromise maskPromise(0x1F); ResolvedPromise maskPromise(0x1F);
lir::Constant mask(&maskPromise); lir::Constant mask(&maskPromise);
lir::Register dst(tmp); lir::RegisterPair dst(tmp);
andC(con, size, &mask, a, &dst); andC(con, size, &mask, a, &dst);
emit(con, lsl(t->low, b->low, tmp)); emit(con, lsl(t->low, b->low, tmp));
freeTemp(con, tmp); freeTemp(con, tmp);
@ -73,15 +75,15 @@ void shiftLeftR(Context* con,
void moveRR(Context* con, void moveRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst); lir::RegisterPair* dst);
void shiftLeftC(Context* con, void shiftLeftC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) { if (getValue(a) & 0x1F) {
@ -93,15 +95,15 @@ void shiftLeftC(Context* con,
void shiftRightR(Context* con, void shiftRightR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con); Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
ResolvedPromise maskPromise(0x3F); ResolvedPromise maskPromise(0x3F);
lir::Constant mask(&maskPromise); lir::Constant mask(&maskPromise);
lir::Register dst(tmp3); lir::RegisterPair dst(tmp3);
andC(con, 4, &mask, a, &dst); andC(con, 4, &mask, a, &dst);
emit(con, lsr(tmp1, b->low, tmp3)); emit(con, lsr(tmp1, b->low, tmp3));
emit(con, rsbi(tmp2, tmp3, 32)); emit(con, rsbi(tmp2, tmp3, 32));
@ -114,10 +116,10 @@ void shiftRightR(Context* con,
freeTemp(con, tmp2); freeTemp(con, tmp2);
freeTemp(con, tmp3); freeTemp(con, tmp3);
} else { } else {
int tmp = newTemp(con); Register tmp = newTemp(con);
ResolvedPromise maskPromise(0x1F); ResolvedPromise maskPromise(0x1F);
lir::Constant mask(&maskPromise); lir::Constant mask(&maskPromise);
lir::Register dst(tmp); lir::RegisterPair dst(tmp);
andC(con, size, &mask, a, &dst); andC(con, size, &mask, a, &dst);
emit(con, asr(t->low, b->low, tmp)); emit(con, asr(t->low, b->low, tmp));
freeTemp(con, tmp); freeTemp(con, tmp);
@ -127,8 +129,8 @@ void shiftRightR(Context* con,
void shiftRightC(Context* con, void shiftRightC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) { if (getValue(a) & 0x1F) {
@ -140,18 +142,18 @@ void shiftRightC(Context* con,
void unsignedShiftRightR(Context* con, void unsignedShiftRightR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
int tmpShift = newTemp(con); Register tmpShift = newTemp(con);
ResolvedPromise maskPromise(size == 8 ? 0x3F : 0x1F); ResolvedPromise maskPromise(size == 8 ? 0x3F : 0x1F);
lir::Constant mask(&maskPromise); lir::Constant mask(&maskPromise);
lir::Register dst(tmpShift); lir::RegisterPair dst(tmpShift);
andC(con, 4, &mask, a, &dst); andC(con, 4, &mask, a, &dst);
emit(con, lsr(t->low, b->low, tmpShift)); emit(con, lsr(t->low, b->low, tmpShift));
if (size == 8) { if (size == 8) {
int tmpHi = newTemp(con), tmpLo = newTemp(con); Register tmpHi = newTemp(con), tmpLo = newTemp(con);
emit(con, SETS(rsbi(tmpHi, tmpShift, 32))); emit(con, SETS(rsbi(tmpHi, tmpShift, 32)));
emit(con, lsl(tmpLo, b->high, tmpHi)); emit(con, lsl(tmpLo, b->high, tmpHi));
emit(con, orr(t->low, t->low, tmpLo)); emit(con, orr(t->low, t->low, tmpLo));
@ -168,8 +170,8 @@ void unsignedShiftRightR(Context* con,
void unsignedShiftRightC(Context* con, void unsignedShiftRightC(Context* con,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) { if (getValue(a) & 0x1F) {
@ -179,102 +181,7 @@ void unsignedShiftRightC(Context* con,
} }
} }
bool needJump(MyBlock* b) void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
{
return b->next or b->size != (b->size & PoolOffsetMask);
}
unsigned padding(MyBlock* b, unsigned offset)
{
unsigned total = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
if (e->offset <= offset) {
if (needJump(b)) {
total += vm::TargetBytesPerWord;
}
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
total += vm::TargetBytesPerWord;
}
} else {
break;
}
}
return total;
}
void resolve(MyBlock* b)
{
Context* con = b->context;
if (b->poolOffsetHead) {
if (con->poolOffsetTail) {
con->poolOffsetTail->next = b->poolOffsetHead;
} else {
con->poolOffsetHead = b->poolOffsetHead;
}
con->poolOffsetTail = b->poolOffsetTail;
}
if (con->poolOffsetHead) {
bool append;
if (b->next == 0 or b->next->poolEventHead) {
append = true;
} else {
int32_t v
= (b->start + b->size + b->next->size + vm::TargetBytesPerWord - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
append = (v != (v & PoolOffsetMask));
if (DebugPool) {
fprintf(stderr,
"current %p %d %d next %p %d %d\n",
b,
b->start,
b->size,
b->next,
b->start + b->size,
b->next->size);
fprintf(stderr,
"offset %p %d is of distance %d to next block; append? %d\n",
con->poolOffsetHead,
con->poolOffsetHead->offset,
v,
append);
}
}
if (append) {
#ifndef NDEBUG
int32_t v
= (b->start + b->size - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
expect(con, v == (v & PoolOffsetMask));
#endif // not NDEBUG
appendPoolEvent(
con, b, b->size, con->poolOffsetHead, con->poolOffsetTail);
if (DebugPool) {
for (PoolOffset* o = con->poolOffsetHead; o; o = o->next) {
fprintf(stderr,
"include %p %d in pool event %p at offset %d in block %p\n",
o,
o->offset,
b->poolEventTail,
b->size,
b);
}
}
con->poolOffsetHead = 0;
con->poolOffsetTail = 0;
}
}
}
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
emit(con, bx(target->low)); emit(con, bx(target->low));
@ -282,14 +189,14 @@ void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
void swapRR(Context* con, void swapRR(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(con, aSize == vm::TargetBytesPerWord); assertT(con, aSize == vm::TargetBytesPerWord);
assertT(con, bSize == vm::TargetBytesPerWord); assertT(con, bSize == vm::TargetBytesPerWord);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveRR(con, aSize, a, bSize, &tmp); moveRR(con, aSize, a, bSize, &tmp);
moveRR(con, bSize, b, aSize, a); moveRR(con, bSize, b, aSize, a);
moveRR(con, bSize, &tmp, bSize, b); moveRR(con, bSize, &tmp, bSize, b);
@ -298,9 +205,9 @@ void swapRR(Context* con,
void moveRR(Context* con, void moveRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst) lir::RegisterPair* dst)
{ {
bool srcIsFpr = isFpr(src); bool srcIsFpr = isFpr(src);
bool dstIsFpr = isFpr(dst); bool dstIsFpr = isFpr(dst);
@ -343,8 +250,8 @@ void moveRR(Context* con,
moveRR(con, 4, src, 4, dst); moveRR(con, 4, src, 4, dst);
emit(con, asri(dst->high, src->low, 31)); emit(con, asri(dst->high, src->low, 31));
} else if (srcSize == 8 and dstSize == 8) { } else if (srcSize == 8 and dstSize == 8) {
lir::Register srcHigh(src->high); lir::RegisterPair srcHigh(src->high);
lir::Register dstHigh(dst->high); lir::RegisterPair dstHigh(dst->high);
if (src->high == dst->low) { if (src->high == dst->low) {
if (src->low == dst->high) { if (src->low == dst->high) {
@ -369,9 +276,9 @@ void moveRR(Context* con,
void moveZRR(Context* con, void moveZRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned, unsigned,
lir::Register* dst) lir::RegisterPair* dst)
{ {
switch (srcSize) { switch (srcSize) {
case 2: case 2:
@ -388,16 +295,16 @@ void moveCR(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
unsigned, unsigned,
lir::Register* dst); lir::RegisterPair* dst);
void moveCR2(Context* con, void moveCR2(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
lir::Register* dst, lir::RegisterPair* dst,
Promise* callOffset) Promise* callOffset)
{ {
if (isFpr(dst)) { // floating-point if (isFpr(dst)) { // floating-point
lir::Register tmp = size > 4 ? makeTemp64(con) : makeTemp(con); lir::RegisterPair tmp = size > 4 ? makeTemp64(con) : makeTemp(con);
moveCR(con, size, src, size, &tmp); moveCR(con, size, src, size, &tmp);
moveRR(con, size, &tmp, size, dst); moveRR(con, size, &tmp, size, dst);
freeTemp(con, tmp); freeTemp(con, tmp);
@ -407,10 +314,11 @@ void moveCR2(Context* con,
lir::Constant srcLo(&loBits); lir::Constant srcLo(&loBits);
ResolvedPromise hiBits(value >> 32); ResolvedPromise hiBits(value >> 32);
lir::Constant srcHi(&hiBits); lir::Constant srcHi(&hiBits);
lir::Register dstHi(dst->high); lir::RegisterPair dstHi(dst->high);
moveCR(con, 4, &srcLo, 4, dst); moveCR(con, 4, &srcLo, 4, dst);
moveCR(con, 4, &srcHi, 4, &dstHi); moveCR(con, 4, &srcHi, 4, &dstHi);
} else if (src->value->resolved() and isOfWidth(getValue(src), 8)) { } else if (callOffset == 0 and src->value->resolved()
and isOfWidth(getValue(src), 8)) {
emit(con, movi(dst->low, lo8(getValue(src)))); // fits in immediate emit(con, movi(dst->low, lo8(getValue(src)))); // fits in immediate
} else { } else {
appendConstantPoolEntry(con, src->value, callOffset); appendConstantPoolEntry(con, src->value, callOffset);
@ -422,16 +330,16 @@ void moveCR(Context* con,
unsigned size, unsigned size,
lir::Constant* src, lir::Constant* src,
unsigned, unsigned,
lir::Register* dst) lir::RegisterPair* dst)
{ {
moveCR2(con, size, src, dst, 0); moveCR2(con, size, src, dst, 0);
} }
void addR(Context* con, void addR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, SETS(add(t->low, a->low, b->low))); emit(con, SETS(add(t->low, a->low, b->low)));
@ -443,9 +351,9 @@ void addR(Context* con,
void subR(Context* con, void subR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, SETS(rsb(t->low, a->low, b->low))); emit(con, SETS(rsb(t->low, a->low, b->low)));
@ -458,8 +366,8 @@ void subR(Context* con,
void addC(Context* con, void addC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
@ -481,8 +389,8 @@ void addC(Context* con,
void subC(Context* con, void subC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
@ -503,15 +411,15 @@ void subC(Context* con,
void multiplyR(Context* con, void multiplyR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
bool useTemporaries = b->low == t->low; bool useTemporaries = b->low == t->low;
int tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK) Register tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->low; : t->low;
int tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK) Register tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->high; : t->high;
emit(con, umull(tmpLow, tmpHigh, a->low, b->low)); emit(con, umull(tmpLow, tmpHigh, a->low, b->low));
@ -531,9 +439,9 @@ void multiplyR(Context* con,
void floatAbsoluteRR(Context* con, void floatAbsoluteRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b) lir::RegisterPair* b)
{ {
if (size == 8) { if (size == 8) {
emit(con, fabsd(fpr64(b), fpr64(a))); emit(con, fabsd(fpr64(b), fpr64(a)));
@ -544,9 +452,9 @@ void floatAbsoluteRR(Context* con,
void floatNegateRR(Context* con, void floatNegateRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b) lir::RegisterPair* b)
{ {
if (size == 8) { if (size == 8) {
emit(con, fnegd(fpr64(b), fpr64(a))); emit(con, fnegd(fpr64(b), fpr64(a)));
@ -557,9 +465,9 @@ void floatNegateRR(Context* con,
void float2FloatRR(Context* con, void float2FloatRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b) lir::RegisterPair* b)
{ {
if (size == 8) { if (size == 8) {
emit(con, fcvtsd(fpr32(b), fpr64(a))); emit(con, fcvtsd(fpr32(b), fpr64(a)));
@ -570,11 +478,11 @@ void float2FloatRR(Context* con,
void float2IntRR(Context* con, void float2IntRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b) lir::RegisterPair* b)
{ {
int tmp = newTemp(con, FPR_MASK); Register tmp = newTemp(con, FPR_MASK);
int ftmp = fpr32(tmp); int ftmp = fpr32(tmp);
if (size == 8) { // double to int if (size == 8) { // double to int
emit(con, ftosizd(ftmp, fpr64(a))); emit(con, ftosizd(ftmp, fpr64(a)));
@ -587,9 +495,9 @@ void float2IntRR(Context* con,
void int2FloatRR(Context* con, void int2FloatRR(Context* con,
unsigned, unsigned,
lir::Register* a, lir::RegisterPair* a,
unsigned size, unsigned size,
lir::Register* b) lir::RegisterPair* b)
{ {
emit(con, fmsr(fpr32(b), a->low)); emit(con, fmsr(fpr32(b), a->low));
if (size == 8) { // int to double if (size == 8) { // int to double
@ -601,9 +509,9 @@ void int2FloatRR(Context* con,
void floatSqrtRR(Context* con, void floatSqrtRR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
unsigned, unsigned,
lir::Register* b) lir::RegisterPair* b)
{ {
if (size == 8) { if (size == 8) {
emit(con, fsqrtd(fpr64(b), fpr64(a))); emit(con, fsqrtd(fpr64(b), fpr64(a)));
@ -614,9 +522,9 @@ void floatSqrtRR(Context* con,
void floatAddR(Context* con, void floatAddR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, faddd(fpr64(t), fpr64(a), fpr64(b))); emit(con, faddd(fpr64(t), fpr64(a), fpr64(b)));
@ -627,9 +535,9 @@ void floatAddR(Context* con,
void floatSubtractR(Context* con, void floatSubtractR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, fsubd(fpr64(t), fpr64(b), fpr64(a))); emit(con, fsubd(fpr64(t), fpr64(b), fpr64(a)));
@ -640,9 +548,9 @@ void floatSubtractR(Context* con,
void floatMultiplyR(Context* con, void floatMultiplyR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, fmuld(fpr64(t), fpr64(a), fpr64(b))); emit(con, fmuld(fpr64(t), fpr64(a), fpr64(b)));
@ -653,9 +561,9 @@ void floatMultiplyR(Context* con,
void floatDivideR(Context* con, void floatDivideR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* t) lir::RegisterPair* t)
{ {
if (size == 8) { if (size == 8) {
emit(con, fdivd(fpr64(t), fpr64(b), fpr64(a))); emit(con, fdivd(fpr64(t), fpr64(b), fpr64(a)));
@ -664,15 +572,15 @@ void floatDivideR(Context* con,
} }
} }
int normalize(Context* con, Register normalize(Context* con,
int offset, int offset,
int index, Register index,
unsigned scale, unsigned scale,
bool* preserveIndex, bool* preserveIndex,
bool* release) bool* release)
{ {
if (offset != 0 or scale != 1) { if (offset != 0 or scale != 1) {
lir::Register normalizedIndex( lir::RegisterPair normalizedIndex(
*preserveIndex ? con->client->acquireTemporary(GPR_MASK) : index); *preserveIndex ? con->client->acquireTemporary(GPR_MASK) : index);
if (*preserveIndex) { if (*preserveIndex) {
@ -682,10 +590,10 @@ int normalize(Context* con,
*release = false; *release = false;
} }
int scaled; Register scaled;
if (scale != 1) { if (scale != 1) {
lir::Register unscaledIndex(index); lir::RegisterPair unscaledIndex(index);
ResolvedPromise scalePromise(log(scale)); ResolvedPromise scalePromise(log(scale));
lir::Constant scaleConstant(&scalePromise); lir::Constant scaleConstant(&scalePromise);
@ -702,12 +610,12 @@ int normalize(Context* con,
} }
if (offset != 0) { if (offset != 0) {
lir::Register untranslatedIndex(scaled); lir::RegisterPair untranslatedIndex(scaled);
ResolvedPromise offsetPromise(offset); ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise); lir::Constant offsetConstant(&offsetPromise);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con, moveCR(con,
vm::TargetBytesPerWord, vm::TargetBytesPerWord,
&offsetConstant, &offsetConstant,
@ -730,16 +638,16 @@ int normalize(Context* con,
void store(Context* con, void store(Context* con,
unsigned size, unsigned size,
lir::Register* src, lir::RegisterPair* src,
int base, Register base,
int offset, int offset,
int index, Register index,
unsigned scale, unsigned scale,
bool preserveIndex) bool preserveIndex)
{ {
if (index != lir::NoRegister) { if (index != NoRegister) {
bool release; bool release;
int normalized Register normalized
= normalize(con, offset, index, scale, &preserveIndex, &release); = normalize(con, offset, index, scale, &preserveIndex, &release);
if (!isFpr(src)) { // GPR store if (!isFpr(src)) { // GPR store
@ -757,7 +665,7 @@ void store(Context* con,
break; break;
case 8: { // split into 2 32-bit stores case 8: { // split into 2 32-bit stores
lir::Register srcHigh(src->high); lir::RegisterPair srcHigh(src->high);
store(con, 4, &srcHigh, base, 0, normalized, 1, preserveIndex); store(con, 4, &srcHigh, base, 0, normalized, 1, preserveIndex);
store(con, 4, src, base, 4, normalized, 1, preserveIndex); store(con, 4, src, base, 4, normalized, 1, preserveIndex);
} break; } break;
@ -766,7 +674,7 @@ void store(Context* con,
abort(con); abort(con);
} }
} else { // FPR store } else { // FPR store
lir::Register base_(base), normalized_(normalized), lir::RegisterPair base_(base), normalized_(normalized),
absAddr = makeTemp(con); absAddr = makeTemp(con);
// FPR stores have only bases, so we must add the index // FPR stores have only bases, so we must add the index
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr); addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
@ -798,9 +706,9 @@ void store(Context* con,
break; break;
case 8: { // split into 2 32-bit stores case 8: { // split into 2 32-bit stores
lir::Register srcHigh(src->high); lir::RegisterPair srcHigh(src->high);
store(con, 4, &srcHigh, base, offset, lir::NoRegister, 1, false); store(con, 4, &srcHigh, base, offset, NoRegister, 1, false);
store(con, 4, src, base, offset + 4, lir::NoRegister, 1, false); store(con, 4, src, base, offset + 4, NoRegister, 1, false);
} break; } break;
default: default:
@ -815,7 +723,7 @@ void store(Context* con,
emit(con, fsts(fpr32(src), base, offset)); emit(con, fsts(fpr32(src), base, offset));
} }
} else { } else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
ResolvedPromise offsetPromise(offset); ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise); lir::Constant offsetConstant(&offsetPromise);
moveCR(con, moveCR(con,
@ -832,7 +740,7 @@ void store(Context* con,
void moveRM(Context* con, void moveRM(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize UNUSED, unsigned dstSize UNUSED,
lir::Memory* dst) lir::Memory* dst)
{ {
@ -844,18 +752,18 @@ void moveRM(Context* con,
void load(Context* con, void load(Context* con,
unsigned srcSize, unsigned srcSize,
int base, Register base,
int offset, int offset,
int index, Register index,
unsigned scale, unsigned scale,
unsigned dstSize, unsigned dstSize,
lir::Register* dst, lir::RegisterPair* dst,
bool preserveIndex, bool preserveIndex,
bool signExtend) bool signExtend)
{ {
if (index != lir::NoRegister) { if (index != NoRegister) {
bool release; bool release;
int normalized Register normalized
= normalize(con, offset, index, scale, &preserveIndex, &release); = normalize(con, offset, index, scale, &preserveIndex, &release);
if (!isFpr(dst)) { // GPR load if (!isFpr(dst)) { // GPR load
@ -882,7 +790,7 @@ void load(Context* con,
load(con, 4, base, 0, normalized, 1, 4, dst, preserveIndex, false); load(con, 4, base, 0, normalized, 1, 4, dst, preserveIndex, false);
moveRR(con, 4, dst, 8, dst); moveRR(con, 4, dst, 8, dst);
} else if (srcSize == 8 and dstSize == 8) { } else if (srcSize == 8 and dstSize == 8) {
lir::Register dstHigh(dst->high); lir::RegisterPair dstHigh(dst->high);
load(con, load(con,
4, 4,
base, base,
@ -903,7 +811,7 @@ void load(Context* con,
abort(con); abort(con);
} }
} else { // FPR load } else { // FPR load
lir::Register base_(base), normalized_(normalized), lir::RegisterPair base_(base), normalized_(normalized),
absAddr = makeTemp(con); absAddr = makeTemp(con);
// VFP loads only have bases, so we must add the index // VFP loads only have bases, so we must add the index
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr); addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
@ -946,27 +854,9 @@ void load(Context* con,
case 8: { case 8: {
if (dstSize == 8) { if (dstSize == 8) {
lir::Register dstHigh(dst->high); lir::RegisterPair dstHigh(dst->high);
load(con, load(con, 4, base, offset, NoRegister, 1, 4, &dstHigh, false, false);
4, load(con, 4, base, offset + 4, NoRegister, 1, 4, dst, false, false);
base,
offset,
lir::NoRegister,
1,
4,
&dstHigh,
false,
false);
load(con,
4,
base,
offset + 4,
lir::NoRegister,
1,
4,
dst,
false,
false);
} else { } else {
emit(con, ldri(dst->low, base, offset)); emit(con, ldri(dst->low, base, offset));
} }
@ -984,7 +874,7 @@ void load(Context* con,
emit(con, flds(fpr32(dst), base, offset)); emit(con, flds(fpr32(dst), base, offset));
} }
} else { } else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
ResolvedPromise offsetPromise(offset); ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise); lir::Constant offsetConstant(&offsetPromise);
moveCR(con, moveCR(con,
@ -1003,7 +893,7 @@ void moveMR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Memory* src, lir::Memory* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst) lir::RegisterPair* dst)
{ {
load(con, load(con,
srcSize, srcSize,
@ -1021,7 +911,7 @@ void moveZMR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Memory* src, lir::Memory* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst) lir::RegisterPair* dst)
{ {
load(con, load(con,
srcSize, srcSize,
@ -1037,9 +927,9 @@ void moveZMR(Context* con,
void andR(Context* con, void andR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
if (size == 8) if (size == 8)
emit(con, and_(dst->high, a->high, b->high)); emit(con, and_(dst->high, a->high, b->high));
@ -1049,8 +939,8 @@ void andR(Context* con,
void andC(Context* con, void andC(Context* con,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
int64_t v = a->value->value(); int64_t v = a->value->value();
@ -1061,8 +951,8 @@ void andC(Context* con,
ResolvedPromise low(v & 0xFFFFFFFF); ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low); lir::Constant al(&low);
lir::Register bh(b->high); lir::RegisterPair bh(b->high);
lir::Register dh(dst->high); lir::RegisterPair dh(dst->high);
andC(con, 4, &al, b, dst); andC(con, 4, &al, b, dst);
andC(con, 4, &ah, &bh, &dh); andC(con, 4, &ah, &bh, &dh);
@ -1078,7 +968,7 @@ void andC(Context* con,
// instruction // instruction
bool useTemporary = b->low == dst->low; bool useTemporary = b->low == dst->low;
lir::Register tmp(dst->low); lir::RegisterPair tmp(dst->low);
if (useTemporary) { if (useTemporary) {
tmp.low = con->client->acquireTemporary(GPR_MASK); tmp.low = con->client->acquireTemporary(GPR_MASK);
} }
@ -1098,9 +988,9 @@ void andC(Context* con,
void orR(Context* con, void orR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
if (size == 8) if (size == 8)
emit(con, orr(dst->high, a->high, b->high)); emit(con, orr(dst->high, a->high, b->high));
@ -1109,9 +999,9 @@ void orR(Context* con,
void xorR(Context* con, void xorR(Context* con,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Register* dst) lir::RegisterPair* dst)
{ {
if (size == 8) if (size == 8)
emit(con, eor(dst->high, a->high, b->high)); emit(con, eor(dst->high, a->high, b->high));
@ -1122,14 +1012,14 @@ void moveAR2(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Address* src, lir::Address* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst) lir::RegisterPair* dst)
{ {
assertT(con, srcSize == 4 and dstSize == 4); assertT(con, srcSize == 4 and dstSize == 4);
lir::Constant constant(src->address); lir::Constant constant(src->address);
moveCR(con, srcSize, &constant, dstSize, dst); moveCR(con, srcSize, &constant, dstSize, dst);
lir::Memory memory(dst->low, 0, -1, 0); lir::Memory memory(dst->low, 0, NoRegister, 0);
moveMR(con, dstSize, &memory, dstSize, dst); moveMR(con, dstSize, &memory, dstSize, dst);
} }
@ -1137,16 +1027,16 @@ void moveAR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Address* src, lir::Address* src,
unsigned dstSize, unsigned dstSize,
lir::Register* dst) lir::RegisterPair* dst)
{ {
moveAR2(con, srcSize, src, dstSize, dst); moveAR2(con, srcSize, src, dstSize, dst);
} }
void compareRR(Context* con, void compareRR(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type assertT(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
@ -1168,14 +1058,14 @@ void compareCR(Context* con,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(con, aSize == 4 and bSize == 4); assertT(con, aSize == 4 and bSize == 4);
if (!isFpr(b) && a->value->resolved() && isOfWidth(a->value->value(), 8)) { if (!isFpr(b) && a->value->resolved() && isOfWidth(a->value->value(), 8)) {
emit(con, cmpi(b->low, a->value->value())); emit(con, cmpi(b->low, a->value->value()));
} else { } else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con, aSize, a, bSize, &tmp); moveCR(con, aSize, a, bSize, &tmp);
compareRR(con, bSize, &tmp, bSize, b); compareRR(con, bSize, &tmp, bSize, b);
con->client->releaseTemporary(tmp.low); con->client->releaseTemporary(tmp.low);
@ -1190,7 +1080,7 @@ void compareCM(Context* con,
{ {
assertT(con, aSize == 4 and bSize == 4); assertT(con, aSize == 4 and bSize == 4);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveMR(con, bSize, b, bSize, &tmp); moveMR(con, bSize, b, bSize, &tmp);
compareCR(con, aSize, a, bSize, &tmp); compareCR(con, aSize, a, bSize, &tmp);
con->client->releaseTemporary(tmp.low); con->client->releaseTemporary(tmp.low);
@ -1198,13 +1088,13 @@ void compareCM(Context* con,
void compareRM(Context* con, void compareRM(Context* con,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Memory* b) lir::Memory* b)
{ {
assertT(con, aSize == 4 and bSize == 4); assertT(con, aSize == 4 and bSize == 4);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveMR(con, bSize, b, bSize, &tmp); moveMR(con, bSize, b, bSize, &tmp);
compareRR(con, aSize, a, bSize, &tmp); compareRR(con, aSize, a, bSize, &tmp);
con->client->releaseTemporary(tmp.low); con->client->releaseTemporary(tmp.low);
@ -1352,13 +1242,13 @@ void branchLong(Context* con,
void branchRR(Context* con, void branchRR(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target) lir::Constant* target)
{ {
if (!isFpr(a) && size > vm::TargetBytesPerWord) { if (!isFpr(a) && size > vm::TargetBytesPerWord) {
lir::Register ah(a->high); lir::RegisterPair ah(a->high);
lir::Register bh(b->high); lir::RegisterPair bh(b->high);
branchLong( branchLong(
con, op, a, &ah, b, &bh, target, CAST2(compareRR), CAST2(compareRR)); con, op, a, &ah, b, &bh, target, CAST2(compareRR), CAST2(compareRR));
@ -1372,7 +1262,7 @@ void branchCR(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target) lir::Constant* target)
{ {
assertT(con, !isFloatBranch(op)); assertT(con, !isFloatBranch(op));
@ -1386,7 +1276,7 @@ void branchCR(Context* con,
ResolvedPromise high((v >> 32) & ~static_cast<vm::target_uintptr_t>(0)); ResolvedPromise high((v >> 32) & ~static_cast<vm::target_uintptr_t>(0));
lir::Constant ah(&high); lir::Constant ah(&high);
lir::Register bh(b->high); lir::RegisterPair bh(b->high);
branchLong( branchLong(
con, op, &al, &ah, b, &bh, target, CAST2(compareCR), CAST2(compareCR)); con, op, &al, &ah, b, &bh, target, CAST2(compareCR), CAST2(compareCR));
@ -1399,7 +1289,7 @@ void branchCR(Context* con,
void branchRM(Context* con, void branchRM(Context* con,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Memory* b, lir::Memory* b,
lir::Constant* target) lir::Constant* target)
{ {
@ -1450,7 +1340,7 @@ void moveCM(Context* con,
} break; } break;
default: default:
lir::Register tmp(con->client->acquireTemporary(GPR_MASK)); lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con, srcSize, src, dstSize, &tmp); moveCR(con, srcSize, src, dstSize, &tmp);
moveRM(con, dstSize, &tmp, dstSize, dst); moveRM(con, dstSize, &tmp, dstSize, dst);
con->client->releaseTemporary(tmp.low); con->client->releaseTemporary(tmp.low);
@ -1459,9 +1349,9 @@ void moveCM(Context* con,
void negateRR(Context* con, void negateRR(Context* con,
unsigned srcSize, unsigned srcSize,
lir::Register* src, lir::RegisterPair* src,
unsigned dstSize UNUSED, unsigned dstSize UNUSED,
lir::Register* dst) lir::RegisterPair* dst)
{ {
assertT(con, srcSize == dstSize); assertT(con, srcSize == dstSize);
@ -1473,7 +1363,7 @@ void negateRR(Context* con,
} }
} }
void callR(Context* con, unsigned size UNUSED, lir::Register* target) void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
emit(con, blx(target->low)); emit(con, blx(target->low));
@ -1491,20 +1381,31 @@ void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
lir::Register tmp(4); lir::RegisterPair tmp(Register(4));
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con)); moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
callR(con, vm::TargetBytesPerWord, &tmp); callR(con, vm::TargetBytesPerWord, &tmp);
} }
void alignedLongCallC(Context* con, unsigned size, lir::Constant* target)
{
longCallC(con, size, target);
}
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target) void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
lir::Register tmp(4); // a non-arg reg that we don't mind clobbering lir::RegisterPair tmp(
Register(4)); // a non-arg reg that we don't mind clobbering
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con)); moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
jumpR(con, vm::TargetBytesPerWord, &tmp); jumpR(con, vm::TargetBytesPerWord, &tmp);
} }
void alignedLongJumpC(Context* con, unsigned size, lir::Constant* target)
{
longJumpC(con, size, target);
}
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target) void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
{ {
assertT(con, size == vm::TargetBytesPerWord); assertT(con, size == vm::TargetBytesPerWord);
@ -1554,3 +1455,5 @@ void storeLoadBarrier(Context* con)
} // namespace arm } // namespace arm
} // namespace codegen } // namespace codegen
} // namespace avian } // namespace avian
#endif // TARGET_BYTES_PER_WORD == 4

File diff suppressed because it is too large Load Diff

View File

@ -14,48 +14,61 @@
#include <avian/codegen/lir.h> #include <avian/codegen/lir.h>
#include <avian/codegen/assembler.h> #include <avian/codegen/assembler.h>
#include "avian/environment.h"
namespace avian { namespace avian {
namespace codegen { namespace codegen {
namespace arm { namespace arm {
const uint64_t MASK_LO32 = 0xffffffff; const uint64_t MASK_LO32 = 0xffffffff;
const unsigned MASK_LO16 = 0xffff;
const unsigned MASK_LO8 = 0xff; const unsigned MASK_LO8 = 0xff;
#if TARGET_BYTES_PER_WORD == 8
constexpr Register ThreadRegister(19);
constexpr Register StackRegister(31);
constexpr Register LinkRegister(30);
constexpr Register FrameRegister(29);
constexpr Register ProgramCounter(0xFE); // i.e. unaddressable
const int N_GPRS = 32;
const int N_FPRS = 32;
const RegisterMask GPR_MASK = 0xffffffff;
const RegisterMask FPR_MASK = 0xffffffff00000000;
#else
constexpr Register ThreadRegister(8);
constexpr Register StackRegister(13);
constexpr Register LinkRegister(14);
constexpr Register FrameRegister(0xFE); // i.e. there is none
constexpr Register ProgramCounter(15);
const int N_GPRS = 16; const int N_GPRS = 16;
const int N_FPRS = 16; const int N_FPRS = 16;
const uint32_t GPR_MASK = 0xffff; const RegisterMask GPR_MASK = 0xffff;
const uint32_t FPR_MASK = 0xffff0000; const RegisterMask FPR_MASK = 0xffff0000;
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32; inline int fpr64(Register reg)
const uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
inline bool isFpr(lir::Register* reg)
{ {
return reg->low >= N_GPRS; return reg.index() - N_GPRS;
} }
inline int fpr64(lir::RegisterPair* reg)
inline int fpr64(int reg)
{
return reg - N_GPRS;
}
inline int fpr64(lir::Register* reg)
{ {
return fpr64(reg->low); return fpr64(reg->low);
} }
inline int fpr32(int reg) inline int fpr32(Register reg)
{ {
return fpr64(reg) << 1; return fpr64(reg) << 1;
} }
inline int fpr32(lir::Register* reg) inline int fpr32(lir::RegisterPair* reg)
{ {
return fpr64(reg) << 1; return fpr64(reg) << 1;
} }
#endif
const int ThreadRegister = 8; inline bool isFpr(lir::RegisterPair* reg)
const int StackRegister = 13; {
const int LinkRegister = 14; return reg->low.index() >= N_GPRS;
const int ProgramCounter = 15; }
} // namespace arm } // namespace arm
} // namespace codegen } // namespace codegen

View File

@ -17,9 +17,9 @@ namespace codegen {
class Multimethod { class Multimethod {
public: public:
inline static unsigned index(lir::UnaryOperation operation, inline static unsigned index(lir::UnaryOperation operation,
lir::OperandType operand) lir::Operand::Type operand)
{ {
return operation + (lir::UnaryOperationCount * operand); return operation + (lir::UnaryOperationCount * (unsigned)operand);
} }
}; };

View File

@ -183,37 +183,37 @@ class MyArchitecture : public Architecture {
return &myRegisterFile; return &myRegisterFile;
} }
virtual int scratch() virtual Register scratch()
{ {
return rax; return rax;
} }
virtual int stack() virtual Register stack()
{ {
return rsp; return rsp;
} }
virtual int thread() virtual Register thread()
{ {
return rbx; return rbx;
} }
virtual int returnLow() virtual Register returnLow()
{ {
return rax; return rax;
} }
virtual int returnHigh() virtual Register returnHigh()
{ {
return (TargetBytesPerWord == 4 ? rdx : lir::NoRegister); return (TargetBytesPerWord == 4 ? rdx : NoRegister);
} }
virtual int virtualCallTarget() virtual Register virtualCallTarget()
{ {
return rax; return rax;
} }
virtual int virtualCallIndex() virtual Register virtualCallIndex()
{ {
return rdx; return rdx;
} }
@ -233,14 +233,14 @@ class MyArchitecture : public Architecture {
return 0x7FFFFFFF; return 0x7FFFFFFF;
} }
virtual bool reserved(int register_) virtual bool reserved(Register register_)
{ {
switch (register_) { switch (register_.index()) {
case rbp: case rbp.index():
return UseFramePointer; return UseFramePointer;
case rsp: case rsp.index():
case rbx: case rbx.index():
return true; return true;
default: default:
@ -289,7 +289,7 @@ class MyArchitecture : public Architecture {
return 0; return 0;
} }
virtual int argumentRegister(unsigned index) virtual Register argumentRegister(unsigned index)
{ {
assertT(&c, TargetBytesPerWord == 8); assertT(&c, TargetBytesPerWord == 8);
switch (index) { switch (index) {
@ -501,8 +501,8 @@ class MyArchitecture : public Architecture {
OperandMask& aMask, OperandMask& aMask,
bool* thunk) bool* thunk)
{ {
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand) aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask
| (1 << lir::ConstantOperand); | lir::Operand::ConstantMask;
*thunk = false; *thunk = false;
} }
@ -512,22 +512,20 @@ class MyArchitecture : public Architecture {
unsigned bSize, unsigned bSize,
bool* thunk) bool* thunk)
{ {
aMask.registerMask = GeneralRegisterMask aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false; *thunk = false;
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) aMask.setLowHighRegisterMasks(rax, rdx);
| (static_cast<uint64_t>(1) << rax);
break; break;
case lir::Absolute: case lir::Absolute:
if (aSize <= TargetBytesPerWord) { if (aSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = (static_cast<uint64_t>(1) << rax); aMask.setLowHighRegisterMasks(rax, 0);
} else { } else {
*thunk = true; *thunk = true;
} }
@ -535,9 +533,8 @@ class MyArchitecture : public Architecture {
case lir::FloatAbsolute: case lir::FloatAbsolute:
if (useSSE(&c)) { if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -546,8 +543,8 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate: case lir::FloatNegate:
// floatNegateRR does not support doubles // floatNegateRR does not support doubles
if (useSSE(&c) and aSize == 4 and bSize == 4) { if (useSSE(&c) and aSize == 4 and bSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = FloatRegisterMask; aMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
} else { } else {
*thunk = true; *thunk = true;
} }
@ -555,10 +552,9 @@ class MyArchitecture : public Architecture {
case lir::FloatSquareRoot: case lir::FloatSquareRoot:
if (useSSE(&c)) { if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -566,10 +562,9 @@ class MyArchitecture : public Architecture {
case lir::Float2Float: case lir::Float2Float:
if (useSSE(&c)) { if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -581,10 +576,9 @@ class MyArchitecture : public Architecture {
// thunks or produce inline machine code which handles edge // thunks or produce inline machine code which handles edge
// cases properly. // cases properly.
if (false and useSSE(&c) and bSize <= TargetBytesPerWord) { if (false and useSSE(&c) and bSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -592,11 +586,9 @@ class MyArchitecture : public Architecture {
case lir::Int2Float: case lir::Int2Float:
if (useSSE(&c) and aSize <= TargetBytesPerWord) { if (useSSE(&c) and aSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
aMask.registerMask aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else { } else {
*thunk = true; *thunk = true;
} }
@ -604,21 +596,20 @@ class MyArchitecture : public Architecture {
case lir::Move: case lir::Move:
aMask.typeMask = ~0; aMask.typeMask = ~0;
aMask.registerMask = ~static_cast<uint64_t>(0); aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
if (TargetBytesPerWord == 4) { if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) { if (aSize == 4 and bSize == 8) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
const uint32_t mask = GeneralRegisterMask const RegisterMask mask = GeneralRegisterMask
& ~((1 << rax) | (1 << rdx)); .excluding(rax).excluding(rdx);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.setLowHighRegisterMasks(mask, mask);
} else if (aSize == 1 or bSize == 1) { } else if (aSize == 1 or bSize == 1) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx) const RegisterMask mask = rax | rcx | rdx | rbx;
| (1 << rbx); aMask.setLowHighRegisterMasks(mask, mask);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} }
break; break;
@ -635,68 +626,62 @@ class MyArchitecture : public Architecture {
OperandMask& bMask) OperandMask& bMask)
{ {
bMask.typeMask = ~0; bMask.typeMask = ~0;
bMask.registerMask = GeneralRegisterMask bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
switch (op) { switch (op) {
case lir::Absolute: case lir::Absolute:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = (static_cast<uint64_t>(1) << rax); bMask.setLowHighRegisterMasks(rax, 0);
break; break;
case lir::FloatAbsolute: case lir::FloatAbsolute:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = aMask.registerMask; bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
break; break;
case lir::Negate: case lir::Negate:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = aMask.registerMask; bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
break; break;
case lir::FloatNegate: case lir::FloatNegate:
case lir::FloatSquareRoot: case lir::FloatSquareRoot:
case lir::Float2Float: case lir::Float2Float:
case lir::Int2Float: case lir::Int2Float:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
break; break;
case lir::Float2Int: case lir::Float2Int:
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
break; break;
case lir::Move: case lir::Move:
if (aMask.typeMask if (aMask.typeMask
& ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) { & (lir::Operand::MemoryMask | lir::Operand::AddressMask)) {
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = GeneralRegisterMask bMask.setLowHighRegisterMasks(GeneralRegisterMask | FloatRegisterMask, GeneralRegisterMask);
| (static_cast<uint64_t>(GeneralRegisterMask) } else if (aMask.typeMask & lir::Operand::RegisterPairMask) {
<< 32) | FloatRegisterMask; bMask.typeMask = lir::Operand::RegisterPairMask
} else if (aMask.typeMask & (1 << lir::RegisterOperand)) { | lir::Operand::MemoryMask;
bMask.typeMask = (1 << lir::RegisterOperand) if (aMask.lowRegisterMask & FloatRegisterMask) {
| (1 << lir::MemoryOperand); bMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
if (aMask.registerMask & FloatRegisterMask) {
bMask.registerMask = FloatRegisterMask;
} else { } else {
bMask.registerMask bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} }
} else { } else {
bMask.typeMask = (1 << lir::RegisterOperand) bMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
} }
if (TargetBytesPerWord == 4) { if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) { if (aSize == 4 and bSize == 8) {
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) bMask.setLowHighRegisterMasks(rax, rdx);
| (static_cast<uint64_t>(1) << rax);
} else if (aSize == 1 or bSize == 1) { } else if (aSize == 1 or bSize == 1) {
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx) const RegisterMask mask = rax | rcx | rdx | rbx;
| (1 << rbx); bMask.setLowHighRegisterMasks(mask, mask);
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} }
break; break;
@ -712,46 +697,38 @@ class MyArchitecture : public Architecture {
const OperandMask& dstMask) const OperandMask& dstMask)
{ {
srcMask.typeMask = ~0; srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0); srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
tmpMask.typeMask = 0; tmpMask.typeMask = 0;
tmpMask.registerMask = 0; tmpMask.setLowHighRegisterMasks(0, 0);
if (dstMask.typeMask & (1 << lir::MemoryOperand)) { if (dstMask.typeMask & lir::Operand::MemoryMask) {
// can't move directly from memory to memory // can't move directly from memory to memory
srcMask.typeMask = (1 << lir::RegisterOperand) srcMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::ConstantOperand); | lir::Operand::ConstantMask;
tmpMask.typeMask = 1 << lir::RegisterOperand; tmpMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.registerMask tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
= GeneralRegisterMask } else if (dstMask.typeMask & lir::Operand::RegisterPairMask) {
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else if (dstMask.typeMask & (1 << lir::RegisterOperand)) {
if (size > TargetBytesPerWord) { if (size > TargetBytesPerWord) {
// can't move directly from FPR to GPR or vice-versa for // can't move directly from FPR to GPR or vice-versa for
// values larger than the GPR size // values larger than the GPR size
if (dstMask.registerMask & FloatRegisterMask) { if (dstMask.lowRegisterMask & FloatRegisterMask) {
srcMask.registerMask srcMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
= FloatRegisterMask tmpMask.typeMask = lir::Operand::MemoryMask;
| (static_cast<uint64_t>(FloatRegisterMask) << 32); } else if (dstMask.lowRegisterMask & GeneralRegisterMask) {
tmpMask.typeMask = 1 << lir::MemoryOperand; srcMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
} else if (dstMask.registerMask & GeneralRegisterMask) { tmpMask.typeMask = lir::Operand::MemoryMask;
srcMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
tmpMask.typeMask = 1 << lir::MemoryOperand;
} }
} }
if (dstMask.registerMask & FloatRegisterMask) { if (dstMask.lowRegisterMask & FloatRegisterMask) {
// can't move directly from constant to FPR // can't move directly from constant to FPR
srcMask.typeMask &= ~(1 << lir::ConstantOperand); srcMask.typeMask &= ~lir::Operand::ConstantMask;
if (size > TargetBytesPerWord) { if (size > TargetBytesPerWord) {
tmpMask.typeMask = 1 << lir::MemoryOperand; tmpMask.typeMask = lir::Operand::MemoryMask;
} else { } else {
tmpMask.typeMask = (1 << lir::RegisterOperand) tmpMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
tmpMask.registerMask tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} }
} }
} }
@ -765,13 +742,11 @@ class MyArchitecture : public Architecture {
unsigned, unsigned,
bool* thunk) bool* thunk)
{ {
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.registerMask = GeneralRegisterMask aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.registerMask = GeneralRegisterMask bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false; *thunk = false;
@ -781,14 +756,12 @@ class MyArchitecture : public Architecture {
case lir::FloatMultiply: case lir::FloatMultiply:
case lir::FloatDivide: case lir::FloatDivide:
if (useSSE(&c)) { if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand) aMask.typeMask = lir::Operand::RegisterPairMask
| (1 << lir::MemoryOperand); | lir::Operand::MemoryMask;
bMask.typeMask = (1 << lir::RegisterOperand); bMask.typeMask = lir::Operand::RegisterPairMask;
const uint64_t mask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask; bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
aMask.registerMask = mask;
bMask.registerMask = mask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -800,12 +773,12 @@ class MyArchitecture : public Architecture {
case lir::Multiply: case lir::Multiply:
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); const RegisterMask mask = GeneralRegisterMask .excluding(rax).excluding(rdx);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.setLowHighRegisterMasks(mask, mask);
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask; bMask.setLowHighRegisterMasks(mask, rdx);
} else { } else {
aMask.registerMask = GeneralRegisterMask; aMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
bMask.registerMask = GeneralRegisterMask; bMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
} }
break; break;
@ -813,9 +786,9 @@ class MyArchitecture : public Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
bMask.registerMask = 1 << rax; bMask.setLowHighRegisterMasks(rax, 0);
} }
break; break;
@ -823,9 +796,9 @@ class MyArchitecture : public Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
bMask.registerMask = 1 << rax; bMask.setLowHighRegisterMasks(rax, 0);
} }
break; break;
@ -833,14 +806,13 @@ class MyArchitecture : public Architecture {
case lir::ShiftRight: case lir::ShiftRight:
case lir::UnsignedShiftRight: { case lir::UnsignedShiftRight: {
if (TargetBytesPerWord == 4 and bSize == 8) { if (TargetBytesPerWord == 4 and bSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx); const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.setLowHighRegisterMasks(mask, mask);
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask; bMask.setLowHighRegisterMasks(mask, mask);
} else { } else {
aMask.registerMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32) aMask.setLowHighRegisterMasks(rcx, GeneralRegisterMask);
| (static_cast<uint64_t>(1) << rcx); const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx); bMask.setLowHighRegisterMasks(mask, mask);
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} break; } break;
@ -855,11 +827,11 @@ class MyArchitecture : public Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (useSSE(&c)) { if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand); aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
| FloatRegisterMask;
bMask.typeMask = aMask.typeMask; bMask.typeMask = aMask.typeMask;
bMask.registerMask = aMask.registerMask; bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -879,11 +851,12 @@ class MyArchitecture : public Architecture {
OperandMask& cMask) OperandMask& cMask)
{ {
if (isBranch(op)) { if (isBranch(op)) {
cMask.typeMask = (1 << lir::ConstantOperand); cMask.typeMask = lir::Operand::ConstantMask;
cMask.registerMask = 0; cMask.setLowHighRegisterMasks(0, 0);
} else { } else {
cMask.typeMask = (1 << lir::RegisterOperand); cMask.typeMask = lir::Operand::RegisterPairMask;
cMask.registerMask = bMask.registerMask; cMask.lowRegisterMask = bMask.lowRegisterMask;
cMask.highRegisterMask = bMask.highRegisterMask;
} }
} }
@ -927,7 +900,7 @@ class MyAssembler : public Assembler {
virtual void checkStackOverflow(uintptr_t handler, virtual void checkStackOverflow(uintptr_t handler,
unsigned stackLimitOffsetFromThread) unsigned stackLimitOffsetFromThread)
{ {
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Memory stackLimit(rbx, stackLimitOffsetFromThread); lir::Memory stackLimit(rbx, stackLimitOffsetFromThread);
lir::Constant handlerConstant(resolvedPromise(&c, handler)); lir::Constant handlerConstant(resolvedPromise(&c, handler));
branchRM(&c, branchRM(&c,
@ -940,11 +913,11 @@ class MyAssembler : public Assembler {
virtual void saveFrame(unsigned stackOffset, unsigned) virtual void saveFrame(unsigned stackOffset, unsigned)
{ {
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Memory stackDst(rbx, stackOffset); lir::Memory stackDst(rbx, stackOffset);
apply(lir::Move, apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &stackDst)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &stackDst));
} }
virtual void pushFrame(unsigned argumentCount, ...) virtual void pushFrame(unsigned argumentCount, ...)
@ -952,7 +925,7 @@ class MyAssembler : public Assembler {
// TODO: Argument should be replaced by OperandInfo... // TODO: Argument should be replaced by OperandInfo...
struct Argument { struct Argument {
unsigned size; unsigned size;
lir::OperandType type; lir::Operand::Type type;
lir::Operand* operand; lir::Operand* operand;
}; };
RUNTIME_ARRAY(Argument, arguments, argumentCount); RUNTIME_ARRAY(Argument, arguments, argumentCount);
@ -962,7 +935,7 @@ class MyAssembler : public Assembler {
for (unsigned i = 0; i < argumentCount; ++i) { for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned); RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
RUNTIME_ARRAY_BODY(arguments)[i].type RUNTIME_ARRAY_BODY(arguments)[i].type
= static_cast<lir::OperandType>(va_arg(a, int)); = static_cast<lir::Operand::Type>(va_arg(a, int));
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*); RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord); TargetBytesPerWord);
@ -974,14 +947,14 @@ class MyAssembler : public Assembler {
unsigned offset = 0; unsigned offset = 0;
for (unsigned i = 0; i < argumentCount; ++i) { for (unsigned i = 0; i < argumentCount; ++i) {
if (i < arch_->argumentRegisterCount()) { if (i < arch_->argumentRegisterCount()) {
lir::Register dst(arch_->argumentRegister(i)); lir::RegisterPair dst(arch_->argumentRegister(i));
apply(lir::Move, apply(lir::Move,
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
RUNTIME_ARRAY_BODY(arguments)[i].type, RUNTIME_ARRAY_BODY(arguments)[i].type,
RUNTIME_ARRAY_BODY(arguments)[i].operand), RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord), TargetBytesPerWord),
lir::RegisterOperand, lir::Operand::Type::RegisterPair,
&dst)); &dst));
} else { } else {
lir::Memory dst(rsp, offset * TargetBytesPerWord); lir::Memory dst(rsp, offset * TargetBytesPerWord);
@ -991,7 +964,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand), RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord), TargetBytesPerWord),
lir::MemoryOperand, lir::Operand::Type::Memory,
&dst)); &dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord); TargetBytesPerWord);
@ -1001,67 +974,67 @@ class MyAssembler : public Assembler {
virtual void allocateFrame(unsigned footprint) virtual void allocateFrame(unsigned footprint)
{ {
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
if (UseFramePointer) { if (UseFramePointer) {
lir::Register base(rbp); lir::RegisterPair base(rbp);
pushR(&c, TargetBytesPerWord, &base); pushR(&c, TargetBytesPerWord, &base);
apply(lir::Move, apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base));
} }
lir::Constant footprintConstant( lir::Constant footprintConstant(
resolvedPromise(&c, footprint * TargetBytesPerWord)); resolvedPromise(&c, footprint * TargetBytesPerWord));
apply(lir::Subtract, apply(lir::Subtract,
OperandInfo( OperandInfo(
TargetBytesPerWord, lir::ConstantOperand, &footprintConstant), TargetBytesPerWord, lir::Operand::Type::Constant, &footprintConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
} }
virtual void adjustFrame(unsigned difference) virtual void adjustFrame(unsigned difference)
{ {
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Constant differenceConstant( lir::Constant differenceConstant(
resolvedPromise(&c, difference * TargetBytesPerWord)); resolvedPromise(&c, difference * TargetBytesPerWord));
apply(lir::Subtract, apply(lir::Subtract,
OperandInfo( OperandInfo(
TargetBytesPerWord, lir::ConstantOperand, &differenceConstant), TargetBytesPerWord, lir::Operand::Type::Constant, &differenceConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
} }
virtual void popFrame(unsigned frameFootprint) virtual void popFrame(unsigned frameFootprint)
{ {
if (UseFramePointer) { if (UseFramePointer) {
lir::Register base(rbp); lir::RegisterPair base(rbp);
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
apply(lir::Move, apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
popR(&c, TargetBytesPerWord, &base); popR(&c, TargetBytesPerWord, &base);
} else { } else {
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Constant footprint( lir::Constant footprint(
resolvedPromise(&c, frameFootprint * TargetBytesPerWord)); resolvedPromise(&c, frameFootprint * TargetBytesPerWord));
apply(lir::Add, apply(lir::Add,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint), OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &footprint),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
} }
} }
virtual void popFrameForTailCall(unsigned frameFootprint, virtual void popFrameForTailCall(unsigned frameFootprint,
int offset, int offset,
int returnAddressSurrogate, Register returnAddressSurrogate,
int framePointerSurrogate) Register framePointerSurrogate)
{ {
if (TailCalls) { if (TailCalls) {
if (offset) { if (offset) {
lir::Register tmp(c.client->acquireTemporary()); lir::RegisterPair tmp(c.client->acquireTemporary());
unsigned baseSize = UseFramePointer ? 1 : 0; unsigned baseSize = UseFramePointer ? 1 : 0;
@ -1085,28 +1058,28 @@ class MyAssembler : public Assembler {
if (UseFramePointer) { if (UseFramePointer) {
lir::Memory baseSrc(rsp, frameFootprint * TargetBytesPerWord); lir::Memory baseSrc(rsp, frameFootprint * TargetBytesPerWord);
lir::Register base(rbp); lir::RegisterPair base(rbp);
moveMR(&c, TargetBytesPerWord, &baseSrc, TargetBytesPerWord, &base); moveMR(&c, TargetBytesPerWord, &baseSrc, TargetBytesPerWord, &base);
} }
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Constant footprint(resolvedPromise( lir::Constant footprint(resolvedPromise(
&c, (frameFootprint - offset + baseSize) * TargetBytesPerWord)); &c, (frameFootprint - offset + baseSize) * TargetBytesPerWord));
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack); addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
if (returnAddressSurrogate != lir::NoRegister) { if (returnAddressSurrogate != NoRegister) {
assertT(&c, offset > 0); assertT(&c, offset > 0);
lir::Register ras(returnAddressSurrogate); lir::RegisterPair ras(returnAddressSurrogate);
lir::Memory dst(rsp, offset * TargetBytesPerWord); lir::Memory dst(rsp, offset * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst); moveRM(&c, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
} }
if (framePointerSurrogate != lir::NoRegister) { if (framePointerSurrogate != NoRegister) {
assertT(&c, offset > 0); assertT(&c, offset > 0);
lir::Register fps(framePointerSurrogate); lir::RegisterPair fps(framePointerSurrogate);
lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord); lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &fps, TargetBytesPerWord, &dst); moveRM(&c, TargetBytesPerWord, &fps, TargetBytesPerWord, &dst);
} }
@ -1127,10 +1100,10 @@ class MyAssembler : public Assembler {
assertT(&c, (argumentFootprint % StackAlignmentInWords) == 0); assertT(&c, (argumentFootprint % StackAlignmentInWords) == 0);
if (TailCalls and argumentFootprint > StackAlignmentInWords) { if (TailCalls and argumentFootprint > StackAlignmentInWords) {
lir::Register returnAddress(rcx); lir::RegisterPair returnAddress(rcx);
popR(&c, TargetBytesPerWord, &returnAddress); popR(&c, TargetBytesPerWord, &returnAddress);
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Constant adjustment(resolvedPromise( lir::Constant adjustment(resolvedPromise(
&c, &c,
(argumentFootprint - StackAlignmentInWords) * TargetBytesPerWord)); (argumentFootprint - StackAlignmentInWords) * TargetBytesPerWord));
@ -1147,10 +1120,10 @@ class MyAssembler : public Assembler {
{ {
popFrame(frameFootprint); popFrame(frameFootprint);
lir::Register returnAddress(rcx); lir::RegisterPair returnAddress(rcx);
popR(&c, TargetBytesPerWord, &returnAddress); popR(&c, TargetBytesPerWord, &returnAddress);
lir::Register stack(rsp); lir::RegisterPair stack(rsp);
lir::Memory stackSrc(rbx, stackOffsetFromThread); lir::Memory stackSrc(rbx, stackOffsetFromThread);
moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &stack); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &stack);
@ -1182,7 +1155,7 @@ class MyAssembler : public Assembler {
if (isBranch(op)) { if (isBranch(op)) {
assertT(&this->c, a.size == b.size); assertT(&this->c, a.size == b.size);
assertT(&this->c, c.size == TargetBytesPerWord); assertT(&this->c, c.size == TargetBytesPerWord);
assertT(&this->c, c.type == lir::ConstantOperand); assertT(&this->c, c.type == lir::Operand::Type::Constant);
arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)]( arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)](
&this->c, op, a.size, a.operand, b.operand, c.operand); &this->c, op, a.size, a.operand, b.operand, c.operand);

View File

@ -68,13 +68,13 @@ class ArchitectureContext {
bool useNativeFeatures; bool useNativeFeatures;
OperationType operations[lir::OperationCount]; OperationType operations[lir::OperationCount];
UnaryOperationType UnaryOperationType
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount]; unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
BinaryOperationType binaryOperations BinaryOperationType binaryOperations
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) [(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * lir::OperandTypeCount]; * lir::Operand::TypeCount * lir::Operand::TypeCount];
BranchOperationType branchOperations[lir::BranchOperationCount BranchOperationType branchOperations[lir::BranchOperationCount
* lir::OperandTypeCount * lir::Operand::TypeCount
* lir::OperandTypeCount]; * lir::Operand::TypeCount];
}; };
class Context { class Context {

View File

@ -53,9 +53,9 @@ namespace x86 {
void maybeRex(Context* c, void maybeRex(Context* c,
unsigned size, unsigned size,
int a, Register a,
int index, Register index,
int base, Register base,
bool always) bool always)
{ {
if (vm::TargetBytesPerWord == 8) { if (vm::TargetBytesPerWord == 8) {
@ -65,63 +65,63 @@ void maybeRex(Context* c,
} else { } else {
byte = REX_NONE; byte = REX_NONE;
} }
if (a != lir::NoRegister and (a & 8)) if (a != NoRegister and (a.index() & 8))
byte |= REX_R; byte |= REX_R;
if (index != lir::NoRegister and (index & 8)) if (index != NoRegister and (index.index() & 8))
byte |= REX_X; byte |= REX_X;
if (base != lir::NoRegister and (base & 8)) if (base != NoRegister and (base.index() & 8))
byte |= REX_B; byte |= REX_B;
if (always or byte != REX_NONE) if (always or byte != REX_NONE)
c->code.append(byte); c->code.append(byte);
} }
} }
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
{ {
maybeRex(c, size, a->low, lir::NoRegister, b->low, false); maybeRex(c, size, a->low, NoRegister, b->low, false);
} }
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
{ {
maybeRex(c, size, a->low, lir::NoRegister, b->low, true); maybeRex(c, size, a->low, NoRegister, b->low, true);
} }
void maybeRex(Context* c, unsigned size, lir::Register* a) void maybeRex(Context* c, unsigned size, lir::RegisterPair* a)
{ {
maybeRex(c, size, lir::NoRegister, lir::NoRegister, a->low, false); maybeRex(c, size, NoRegister, NoRegister, a->low, false);
} }
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b) void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b)
{ {
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low & 4)); maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low.index() & 4));
} }
void maybeRex(Context* c, unsigned size, lir::Memory* a) void maybeRex(Context* c, unsigned size, lir::Memory* a)
{ {
maybeRex(c, size, lir::NoRegister, a->index, a->base, false); maybeRex(c, size, NoRegister, a->index, a->base, false);
} }
void modrm(Context* c, uint8_t mod, int a, int b) void modrm(Context* c, uint8_t mod, Register a, Register b)
{ {
c->code.append(mod | (regCode(b) << 3) | regCode(a)); c->code.append(mod | (regCode(b) << 3) | regCode(a));
} }
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b) void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b)
{ {
modrm(c, mod, a->low, b->low); modrm(c, mod, a->low, b->low);
} }
void sib(Context* c, unsigned scale, int index, int base) void sib(Context* c, unsigned scale, Register index, Register base)
{ {
c->code.append((util::log(scale) << 6) | (regCode(index) << 3) c->code.append((util::log(scale) << 6) | (regCode(index) << 3)
| regCode(base)); | regCode(base));
} }
void modrmSib(Context* c, int width, int a, int scale, int index, int base) void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base)
{ {
if (index == lir::NoRegister) { if (index == NoRegister) {
modrm(c, width, base, a); modrm(c, width, base, a);
if (regCode(base) == rsp) { if (regCode(base) == rsp.index()) {
sib(c, 0x00, rsp, rsp); sib(c, 0x00, rsp, rsp);
} }
} else { } else {
@ -130,9 +130,9 @@ void modrmSib(Context* c, int width, int a, int scale, int index, int base)
} }
} }
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset) void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset)
{ {
if (offset == 0 and regCode(base) != rbp) { if (offset == 0 and regCode(base) != rbp.index()) {
modrmSib(c, 0x00, a, scale, index, base); modrmSib(c, 0x00, a, scale, index, base);
} else if (vm::fitsInInt8(offset)) { } else if (vm::fitsInInt8(offset)) {
modrmSib(c, 0x40, a, scale, index, base); modrmSib(c, 0x40, a, scale, index, base);
@ -143,7 +143,7 @@ void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
} }
} }
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b) void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b)
{ {
modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset); modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset);
} }
@ -177,9 +177,9 @@ void conditional(Context* c, unsigned condition, lir::Constant* a)
void sseMoveRR(Context* c, void sseMoveRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(c, aSize >= 4); assertT(c, aSize >= 4);
assertT(c, aSize == bSize); assertT(c, aSize == bSize);
@ -213,10 +213,10 @@ void sseMoveCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(c, aSize <= vm::TargetBytesPerWord); assertT(c, aSize <= vm::TargetBytesPerWord);
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask)); lir::RegisterPair tmp(c->client->acquireTemporary(GeneralRegisterMask));
moveCR2(c, aSize, a, aSize, &tmp, 0); moveCR2(c, aSize, a, aSize, &tmp, 0);
sseMoveRR(c, aSize, &tmp, bSize, b); sseMoveRR(c, aSize, &tmp, bSize, b);
c->client->releaseTemporary(tmp.low); c->client->releaseTemporary(tmp.low);
@ -226,7 +226,7 @@ void sseMoveMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b) lir::RegisterPair* b)
{ {
assertT(c, aSize >= 4); assertT(c, aSize >= 4);
@ -244,7 +244,7 @@ void sseMoveMR(Context* c,
void sseMoveRM(Context* c, void sseMoveRM(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
UNUSED unsigned bSize, UNUSED unsigned bSize,
lir::Memory* b) lir::Memory* b)
{ {
@ -353,9 +353,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target)
void floatRegOp(Context* c, void floatRegOp(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
uint8_t op, uint8_t op,
uint8_t mod) uint8_t mod)
{ {
@ -373,7 +373,7 @@ void floatMemOp(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize, unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
uint8_t op) uint8_t op)
{ {
if (aSize == 4) { if (aSize == 4) {
@ -390,13 +390,13 @@ void moveCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveCR2(Context* c, void moveCR2(Context* c,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Constant* a, lir::Constant* a,
UNUSED unsigned bSize, UNUSED unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
unsigned promiseOffset) unsigned promiseOffset)
{ {
if (vm::TargetBytesPerWord == 4 and bSize == 8) { if (vm::TargetBytesPerWord == 4 and bSize == 8) {
@ -408,7 +408,7 @@ void moveCR2(Context* c,
ResolvedPromise low(v & 0xFFFFFFFF); ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low); lir::Constant al(&low);
lir::Register bh(b->high); lir::RegisterPair bh(b->high);
moveCR(c, 4, &al, 4, b); moveCR(c, 4, &al, 4, b);
moveCR(c, 4, &ah, 4, &bh); moveCR(c, 4, &ah, 4, &bh);

View File

@ -32,42 +32,42 @@ void maybeRex(Context* c,
int base, int base,
bool always); bool always);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b); void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b); void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void maybeRex(Context* c, unsigned size, lir::Register* a); void maybeRex(Context* c, unsigned size, lir::RegisterPair* a);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b); void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b);
void maybeRex(Context* c, unsigned size, lir::Memory* a); void maybeRex(Context* c, unsigned size, lir::Memory* a);
inline int regCode(int a) inline int regCode(Register a)
{ {
return a & 7; return a.index() & 7;
} }
inline int regCode(lir::Register* a) inline int regCode(lir::RegisterPair* a)
{ {
return regCode(a->low); return regCode(a->low);
} }
inline bool isFloatReg(lir::Register* a) inline bool isFloatReg(lir::RegisterPair* a)
{ {
return a->low >= xmm0; return a->low >= xmm0;
} }
void modrm(Context* c, uint8_t mod, int a, int b); void modrm(Context* c, uint8_t mod, Register a, Register b);
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b); void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b);
void sib(Context* c, unsigned scale, int index, int base); void sib(Context* c, unsigned scale, Register index, Register base);
void modrmSib(Context* c, int width, int a, int scale, int index, int base); void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base);
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset); void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset);
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b); void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b);
void opcode(Context* c, uint8_t op); void opcode(Context* c, uint8_t op);
@ -79,25 +79,25 @@ void conditional(Context* c, unsigned condition, lir::Constant* a);
void sseMoveRR(Context* c, void sseMoveRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void sseMoveCR(Context* c, void sseMoveCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void sseMoveMR(Context* c, void sseMoveMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void sseMoveRM(Context* c, void sseMoveRM(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
UNUSED unsigned bSize, UNUSED unsigned bSize,
lir::Memory* b); lir::Memory* b);
@ -107,9 +107,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target);
void floatRegOp(Context* c, void floatRegOp(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
uint8_t op, uint8_t op,
uint8_t mod = 0xc0); uint8_t mod = 0xc0);
@ -117,14 +117,14 @@ void floatMemOp(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize, unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
uint8_t op); uint8_t op);
void moveCR2(Context* c, void moveCR2(Context* c,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Constant* a, lir::Constant* a,
UNUSED unsigned bSize, UNUSED unsigned bSize,
lir::Register* b, lir::RegisterPair* b,
unsigned promiseOffset); unsigned promiseOffset);
} // namespace x86 } // namespace x86

View File

@ -28,42 +28,42 @@ using namespace util;
unsigned index(ArchitectureContext*, unsigned index(ArchitectureContext*,
lir::BinaryOperation operation, lir::BinaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2) lir::Operand::Type operand2)
{ {
return operation + ((lir::BinaryOperationCount return operation + ((lir::BinaryOperationCount
+ lir::NonBranchTernaryOperationCount) * operand1) + lir::NonBranchTernaryOperationCount) * (unsigned)operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2); * lir::Operand::TypeCount * (unsigned)operand2);
} }
unsigned index(ArchitectureContext* c UNUSED, unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation, lir::TernaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2) lir::Operand::Type operand2)
{ {
assertT(c, not isBranch(operation)); assertT(c, not isBranch(operation));
return lir::BinaryOperationCount + operation return lir::BinaryOperationCount + operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* operand1) * (unsigned)operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2); * lir::Operand::TypeCount * (unsigned)operand2);
} }
unsigned branchIndex(ArchitectureContext* c UNUSED, unsigned branchIndex(ArchitectureContext* c UNUSED,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2) lir::Operand::Type operand2)
{ {
return operand1 + (lir::OperandTypeCount * operand2); return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
} }
void populateTables(ArchitectureContext* c) void populateTables(ArchitectureContext* c)
{ {
const lir::OperandType C = lir::ConstantOperand; const lir::Operand::Type C = lir::Operand::Type::Constant;
const lir::OperandType A = lir::AddressOperand; const lir::Operand::Type A = lir::Operand::Type::Address;
const lir::OperandType R = lir::RegisterOperand; const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
const lir::OperandType M = lir::MemoryOperand; const lir::Operand::Type M = lir::Operand::Type::Memory;
OperationType* zo = c->operations; OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations; UnaryOperationType* uo = c->unaryOperations;

View File

@ -23,17 +23,17 @@ class ArchitectureContext;
unsigned index(ArchitectureContext*, unsigned index(ArchitectureContext*,
lir::BinaryOperation operation, lir::BinaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2); lir::Operand::Type operand2);
unsigned index(ArchitectureContext* c UNUSED, unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation, lir::TernaryOperation operation,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2); lir::Operand::Type operand2);
unsigned branchIndex(ArchitectureContext* c UNUSED, unsigned branchIndex(ArchitectureContext* c UNUSED,
lir::OperandType operand1, lir::Operand::Type operand1,
lir::OperandType operand2); lir::Operand::Type operand2);
void populateTables(ArchitectureContext* c); void populateTables(ArchitectureContext* c);

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ void callC(Context* c, unsigned size UNUSED, lir::Constant* a);
void longCallC(Context* c, unsigned size, lir::Constant* a); void longCallC(Context* c, unsigned size, lir::Constant* a);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a); void jumpR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a); void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a);
@ -41,7 +41,7 @@ void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a);
void longJumpC(Context* c, unsigned size, lir::Constant* a); void longJumpC(Context* c, unsigned size, lir::Constant* a);
void callR(Context* c, unsigned size UNUSED, lir::Register* a); void callR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
void callM(Context* c, unsigned size UNUSED, lir::Memory* a); void callM(Context* c, unsigned size UNUSED, lir::Memory* a);
@ -53,51 +53,51 @@ void alignedJumpC(Context* c, unsigned size, lir::Constant* a);
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a); void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a);
void pushR(Context* c, unsigned size, lir::Register* a); void pushR(Context* c, unsigned size, lir::RegisterPair* a);
void popR(Context* c, unsigned size, lir::Register* a); void popR(Context* c, unsigned size, lir::RegisterPair* a);
void negateR(Context* c, unsigned size, lir::Register* a); void negateR(Context* c, unsigned size, lir::RegisterPair* a);
void negateRR(Context* c, void negateRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b UNUSED); lir::RegisterPair* b UNUSED);
void moveCR(Context* c, void moveCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveZCR(Context* c, void moveZCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void swapRR(Context* c, void swapRR(Context* c,
unsigned aSize UNUSED, unsigned aSize UNUSED,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void moveRR(Context* c, void moveRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
UNUSED unsigned bSize, UNUSED unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveMR(Context* c, void moveMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveRM(Context* c, void moveRM(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Memory* b); lir::Memory* b);
@ -105,7 +105,7 @@ void moveAR(Context* c,
unsigned aSize, unsigned aSize,
lir::Address* a, lir::Address* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void moveCM(Context* c, void moveCM(Context* c,
unsigned aSize UNUSED, unsigned aSize UNUSED,
@ -115,111 +115,111 @@ void moveCM(Context* c,
void moveZRR(Context* c, void moveZRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void moveZMR(Context* c, void moveZMR(Context* c,
unsigned aSize UNUSED, unsigned aSize UNUSED,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void addCarryRR(Context* c, unsigned size, lir::Register* a, lir::Register* b); void addCarryRR(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void addRR(Context* c, void addRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::Register* b); void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::RegisterPair* b);
void addCR(Context* c, void addCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void subtractBorrowCR(Context* c, void subtractBorrowCR(Context* c,
unsigned size UNUSED, unsigned size UNUSED,
lir::Constant* a, lir::Constant* a,
lir::Register* b); lir::RegisterPair* b);
void subtractCR(Context* c, void subtractCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void subtractBorrowRR(Context* c, void subtractBorrowRR(Context* c,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b); lir::RegisterPair* b);
void subtractRR(Context* c, void subtractRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void andRR(Context* c, void andRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void andCR(Context* c, void andCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void orRR(Context* c, void orRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void orCR(Context* c, void orCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void xorRR(Context* c, void xorRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void xorCR(Context* c, void xorCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void multiplyRR(Context* c, void multiplyRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void compareRR(Context* c, void compareRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void compareCR(Context* c, void compareCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void compareRM(Context* c, void compareRM(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Memory* b); lir::Memory* b);
@ -231,9 +231,9 @@ void compareCM(Context* c,
void compareFloatRR(Context* c, void compareFloatRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void branchLong(Context* c, void branchLong(Context* c,
lir::TernaryOperation op, lir::TernaryOperation op,
@ -247,21 +247,21 @@ void branchLong(Context* c,
void branchRR(Context* c, void branchRR(Context* c,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target); lir::Constant* target);
void branchCR(Context* c, void branchCR(Context* c,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Constant* a, lir::Constant* a,
lir::Register* b, lir::RegisterPair* b,
lir::Constant* target); lir::Constant* target);
void branchRM(Context* c, void branchRM(Context* c,
lir::TernaryOperation op, lir::TernaryOperation op,
unsigned size, unsigned size,
lir::Register* a, lir::RegisterPair* a,
lir::Memory* b, lir::Memory* b,
lir::Constant* target); lir::Constant* target);
@ -276,181 +276,181 @@ void multiplyCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void divideRR(Context* c, void divideRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b UNUSED); lir::RegisterPair* b UNUSED);
void remainderRR(Context* c, void remainderRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void doShift(Context* c, void doShift(Context* c,
UNUSED void (*shift)(Context*, UNUSED void (*shift)(Context*,
unsigned, unsigned,
lir::Register*, lir::RegisterPair*,
unsigned, unsigned,
lir::Register*), lir::RegisterPair*),
int type, int type,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void shiftLeftRR(Context* c, void shiftLeftRR(Context* c,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void shiftLeftCR(Context* c, void shiftLeftCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void shiftRightRR(Context* c, void shiftRightRR(Context* c,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void shiftRightCR(Context* c, void shiftRightCR(Context* c,
unsigned aSize, unsigned aSize,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void unsignedShiftRightRR(Context* c, void unsignedShiftRightRR(Context* c,
UNUSED unsigned aSize, UNUSED unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void unsignedShiftRightCR(Context* c, void unsignedShiftRightCR(Context* c,
unsigned aSize UNUSED, unsigned aSize UNUSED,
lir::Constant* a, lir::Constant* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void floatSqrtRR(Context* c, void floatSqrtRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatSqrtMR(Context* c, void floatSqrtMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatAddRR(Context* c, void floatAddRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatAddMR(Context* c, void floatAddMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatSubtractRR(Context* c, void floatSubtractRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatSubtractMR(Context* c, void floatSubtractMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatMultiplyRR(Context* c, void floatMultiplyRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatMultiplyMR(Context* c, void floatMultiplyMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatDivideRR(Context* c, void floatDivideRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatDivideMR(Context* c, void floatDivideMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void float2FloatRR(Context* c, void float2FloatRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void float2FloatMR(Context* c, void float2FloatMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void float2IntRR(Context* c, void float2IntRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void float2IntMR(Context* c, void float2IntMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void int2FloatRR(Context* c, void int2FloatRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void int2FloatMR(Context* c, void int2FloatMR(Context* c,
unsigned aSize, unsigned aSize,
lir::Memory* a, lir::Memory* a,
unsigned bSize, unsigned bSize,
lir::Register* b); lir::RegisterPair* b);
void floatNegateRR(Context* c, void floatNegateRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void floatAbsoluteRR(Context* c, void floatAbsoluteRR(Context* c,
unsigned aSize UNUSED, unsigned aSize UNUSED,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b); lir::RegisterPair* b);
void absoluteRR(Context* c, void absoluteRR(Context* c,
unsigned aSize, unsigned aSize,
lir::Register* a, lir::RegisterPair* a,
unsigned bSize UNUSED, unsigned bSize UNUSED,
lir::Register* b UNUSED); lir::RegisterPair* b UNUSED);
} // namespace x86 } // namespace x86
} // namespace codegen } // namespace codegen

View File

@ -15,50 +15,45 @@ namespace avian {
namespace codegen { namespace codegen {
namespace x86 { namespace x86 {
enum { constexpr Register rax((int)0);
rax = 0, constexpr Register rcx(1);
rcx = 1, constexpr Register rdx(2);
rdx = 2, constexpr Register rbx(3);
rbx = 3, constexpr Register rsp(4);
rsp = 4, constexpr Register rbp(5);
rbp = 5, constexpr Register rsi(6);
rsi = 6, constexpr Register rdi(7);
rdi = 7, constexpr Register r8(8);
r8 = 8, constexpr Register r9(9);
r9 = 9, constexpr Register r10(10);
r10 = 10, constexpr Register r11(11);
r11 = 11, constexpr Register r12(12);
r12 = 12, constexpr Register r13(13);
r13 = 13, constexpr Register r14(14);
r14 = 14, constexpr Register r15(15);
r15 = 15, constexpr Register xmm0(16);
}; constexpr Register xmm1(16 + 1);
constexpr Register xmm2(16 + 2);
constexpr Register xmm3(16 + 3);
constexpr Register xmm4(16 + 4);
constexpr Register xmm5(16 + 5);
constexpr Register xmm6(16 + 6);
constexpr Register xmm7(16 + 7);
constexpr Register xmm8(16 + 8);
constexpr Register xmm9(16 + 9);
constexpr Register xmm10(16 + 10);
constexpr Register xmm11(16 + 11);
constexpr Register xmm12(16 + 12);
constexpr Register xmm13(16 + 13);
constexpr Register xmm14(16 + 14);
constexpr Register xmm15(16 + 15);
enum { constexpr Register LongJumpRegister = r10;
xmm0 = r15 + 1,
xmm1,
xmm2,
xmm3,
xmm4,
xmm5,
xmm6,
xmm7,
xmm8,
xmm9,
xmm10,
xmm11,
xmm12,
xmm13,
xmm14,
xmm15,
};
const int LongJumpRegister = r10; constexpr RegisterMask GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
const unsigned GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
: 0x0000ffff; : 0x0000ffff;
const unsigned FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000 constexpr RegisterMask FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000
: 0xffff0000; : 0xffff0000;
} // namespace x86 } // namespace x86

View File

@ -30,7 +30,8 @@ Architecture* makeArchitectureNative(vm::System* system,
#elif(AVIAN_TARGET_ARCH == AVIAN_ARCH_X86) \ #elif(AVIAN_TARGET_ARCH == AVIAN_ARCH_X86) \
|| (AVIAN_TARGET_ARCH == AVIAN_ARCH_X86_64) || (AVIAN_TARGET_ARCH == AVIAN_ARCH_X86_64)
return makeArchitectureX86(system, useNativeFeatures); return makeArchitectureX86(system, useNativeFeatures);
#elif AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM #elif (AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM) \
|| (AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM64)
return makeArchitectureArm(system, useNativeFeatures); return makeArchitectureArm(system, useNativeFeatures);
#else #else
#error "Unsupported codegen target" #error "Unsupported codegen target"

View File

@ -109,7 +109,7 @@ GLOBAL(vmInvoke_safeStack):
ldr r6,[r5,#CONTINUATION_LENGTH] ldr r6,[r5,#CONTINUATION_LENGTH]
lsl r6,r6,#2 lsl r6,r6,#2
neg r7,r6 neg r7,r6
add r7,r7,#-80 add r7,r7,#-80 // 80 bytes for callee-saved register values
mov r4,sp mov r4,sp
str r4,[sp,r7]! str r4,[sp,r7]!
@ -167,10 +167,10 @@ LOCAL(vmInvoke_handleException):
bx r7 bx r7
LOCAL(vmInvoke_exit): LOCAL(vmInvoke_exit):
#endif // AVIAN_CONTINUATIONS
mov ip, #0 mov ip, #0
str ip, [r8, #TARGET_THREAD_STACK] str ip, [r8, #TARGET_THREAD_STACK]
#endif // AVIAN_CONTINUATIONS
// restore return type // restore return type
ldr ip, [sp], #4 ldr ip, [sp], #4

222
src/compile-arm64.S Normal file
View File

@ -0,0 +1,222 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
.text
#define BYTES_PER_WORD 8
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
#define CONTINUATION_NEXT 8
#define CONTINUATION_ADDRESS 32
#define CONTINUATION_RETURN_ADDRESS_OFFSET 40
#define CONTINUATION_FRAME_POINTER_OFFSET 48
#define CONTINUATION_LENGTH 56
#define CONTINUATION_BODY 64
.globl GLOBAL(vmInvoke)
.align 2
GLOBAL(vmInvoke):
// arguments:
// x0 : thread
// x1 : function
// x2 : arguments
// w3 : argumentFootprint
// w4 : frameSize (not used)
// w5 : returnType
// allocate frame
stp x29, x30, [sp,#-96]!
mov x29, sp
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
// save return type
str w5, [sp,#-16]!
mov x5, sp
str x5, [x0,#TARGET_THREAD_SCRATCH]
// copy arguments into place, reserving enough space for them, plus
// alignment padding
sub x5, sp, w3, uxtw
and sp, x5, #-16
mov x4, #0
b LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
ldr x5, [x2, x4]
str x5, [sp, x4]
add x4, x4, #BYTES_PER_WORD
LOCAL(vmInvoke_argumentTest):
cmp x4, x3
b.lt LOCAL(vmInvoke_argumentLoop)
// we use x19 to hold the thread pointer, by convention
mov x19, x0
// load and call function address
blr x1
.globl GLOBAL(vmInvoke_returnAddress)
.align 2
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
ldr x5, [x19, #TARGET_THREAD_SCRATCH]
mov sp, x5
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
str xzr, [x19, #TARGET_THREAD_STACK]
.globl GLOBAL(vmInvoke_safeStack)
.align 2
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
// call the next continuation, if any
ldr x5, [x19,#TARGET_THREAD_CONTINUATION]
cmp x5, xzr
b.eq LOCAL(vmInvoke_exit)
ldr x6, [x5,#CONTINUATION_LENGTH]
lsl x6, x6, #3
neg x7, x6
add x7, x7, #-128 // 128 bytes for callee-saved register values
mov x4, sp
add sp, sp, x7
str x4, [sp]
add x7, x5, #CONTINUATION_BODY
mov x11, xzr
b LOCAL(vmInvoke_continuationTest)
LOCAL(vmInvoke_continuationLoop):
ldr x9, [x7,x11]
str x9, [sp,x11]
add x11, x11, #8
LOCAL(vmInvoke_continuationTest):
cmp x11, x6
b.le LOCAL(vmInvoke_continuationLoop)
ldr x7, [x5,#CONTINUATION_RETURN_ADDRESS_OFFSET]
adr x11, GLOBAL(vmInvoke_returnAddress)
str x11, [sp,x7]
ldr x7, [x5,#CONTINUATION_NEXT]
str x7, [x19,#TARGET_THREAD_CONTINUATION]
// call the continuation unless we're handling an exception
ldr x7, [x19,#TARGET_THREAD_EXCEPTION]
cmp x7, xzr
b.ne LOCAL(vmInvoke_handleException)
ldr x7, [x5,#CONTINUATION_ADDRESS]
br x7
LOCAL(vmInvoke_handleException):
// we're handling an exception - call the exception handler instead
str xzr, [x19,#TARGET_THREAD_EXCEPTION]
ldr x11, [x19,#TARGET_THREAD_EXCEPTIONSTACKADJUSTMENT]
ldr x9, [sp]
neg x11, x11
add sp, sp, x11
str x9, [sp]
ldr x11, [x19,#TARGET_THREAD_EXCEPTIONOFFSET]
str x7, [sp,x11]
ldr x7, [x19,#TARGET_THREAD_EXCEPTIONHANDLER]
br x7
LOCAL(vmInvoke_exit):
str xzr, [x19, #TARGET_THREAD_STACK]
#endif // AVIAN_CONTINUATIONS
// restore return type
ldr w5, [sp],#16
// restore callee-saved register values
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
LOCAL(vmInvoke_return):
br x30
.globl GLOBAL(vmJumpAndInvoke)
.align 2
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// x0: thread
// x1: address
// x2: stack
// x3: argumentFootprint
// x4: arguments
// x5: frameSize
// allocate new frame, adding room for callee-saved registers, plus
// 8 bytes of padding since the calculation of frameSize assumes 8
// bytes have already been allocated to save the return address,
// which is not true in this case
sub x2, x2, x5
sub x2, x2, #136
mov x19, x0
// copy arguments into place
mov x6, xzr
b LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
ldr x12, [x4,x6]
str x12, [x2,x6]
add x6, x6, #4
LOCAL(vmJumpAndInvoke_argumentTest):
cmp x6, x3
ble LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
mov sp, x2
// set return address to vmInvoke_returnAddress
adr x30, GLOBAL(vmInvoke_returnAddress)
br x1
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled, so we force a crash if we reach here:
brk 0
#endif // not AVIAN_CONTINUATIONS

455
src/compile-x86_64.S Normal file
View File

@ -0,0 +1,455 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#ifdef __x86_64__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 8
#endif
#if defined __MINGW32__ || defined __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 64 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rcx: thread
// %rdx: function
// %r8 : arguments
// %r9 : argumentsFootprint
// 48(%rbp) : frameSize
// 56(%rbp) : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rcx)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rsi,40(%rsp)
movq %rdi,48(%rsp)
// allocate stack space for arguments
movl 48(%rbp),%eax
subq %rax,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rcx,%rbx
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%r8,%r11,1),%rsi
movq %rsi,(%rsp,%r11,1)
addq $8,%r11
LOCAL(vmInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rdx
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rsi
movq 48(%rsp),%rdi
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rcx: thread
// %rdx: address
// %r8 : stack
// %r9 : argumentFootprint
// 40(%rsp): arguments
// 48(%rsp): frameSize
// allocate new frame, adding room for callee-saved registers
movl 48(%rsp),%eax
subq %rax,%r8
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%r8
movq %rcx,%rbx
// set return address
leaq GLOBAL(vmInvoke_returnAddress)(%rip),%r10
movq %r10,(%r8)
// copy arguments into place
movq $0,%r11
movl 40(%rsp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%rax,%r11,1),%r10
movq %r10,8(%r8,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %r8,%rsp
jmp *%rdx
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#else // not __MINGW32__ || __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 48 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rdi: thread
// %rsi: function
// %rdx: arguments
// %rcx: argumentFootprint
// %r8 : frameSize
// %r9 : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rdi)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
// allocate stack space for arguments
subq %r8,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rdi,%rbx
// copy arguments into place
movq $0,%r9
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%rdx,%r9,1),%r8
movq %r8,(%rsp,%r9,1)
addq $8,%r9
LOCAL(vmInvoke_argumentTest):
cmpq %rcx,%r9
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rsi
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rdi: thread
// %rsi: address
// %rdx: stack
// %rcx: argumentFootprint
// %r8 : arguments
// %r9 : frameSize
// allocate new frame, adding room for callee-saved registers
subq %r9,%rdx
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rdx
movq %rdi,%rbx
// set return address
movq GLOBAL(vmInvoke_returnAddress)@GOTPCREL(%rip),%r10
movq %r10,(%rdx)
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%r8,%r11,1),%r10
movq %r10,8(%rdx,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %rcx,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %rdx,%rsp
jmp *%rsi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#endif // not __MINGW32__ || __CYGWIN32__
#elif defined __i386__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 12
#endif
#define CALLEE_SAVED_REGISTER_FOOTPRINT 16 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): thread
// 12(%ebp): function
// 16(%ebp): arguments
// 20(%ebp): argumentFootprint
// 24(%ebp): frameSize
// 28(%ebp): returnType
// allocate stack space for callee-saved registers
subl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movl 8(%ebp),%eax
movl %esp,TARGET_THREAD_SCRATCH(%eax)
movl %ebx,0(%esp)
movl %esi,4(%esp)
movl %edi,8(%esp)
// allocate stack space for arguments
subl 24(%ebp),%esp
// we use ebx to hold the thread pointer, by convention
mov %eax,%ebx
// copy arguments into place
movl $0,%ecx
movl 16(%ebp),%edx
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movl (%edx,%ecx,1),%eax
movl %eax,(%esp,%ecx,1)
addl $4,%ecx
LOCAL(vmInvoke_argumentTest):
cmpl 20(%ebp),%ecx
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *12(%ebp)
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movl TARGET_THREAD_SCRATCH(%ebx),%esp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movl $0,TARGET_THREAD_STACK(%ebx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movl 0(%esp),%ebx
movl 4(%esp),%esi
movl 8(%esp),%edi
addl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// handle return value based on expected type
movl 28(%esp),%ecx
popl %ebp
ret
LOCAL(getPC):
movl (%esp),%esi
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// 4(%esp): thread
// 8(%esp): address
// 12(%esp): stack
// 16(%esp): argumentFootprint
// 20(%esp): arguments
// 24(%esp): frameSize
movl 12(%esp),%ecx
// allocate new frame, adding room for callee-saved registers,
// return address, and frame pointer
subl 24(%esp),%ecx
subl $CALLEE_SAVED_REGISTER_FOOTPRINT+8,%ecx
movl 4(%esp),%ebx
// set return address
#if defined __MINGW32__ || defined __CYGWIN32__
movl $GLOBAL(vmInvoke_returnAddress),%esi
#else
call LOCAL(getPC)
# if defined __APPLE__
LOCAL(vmJumpAndInvoke_offset):
leal GLOBAL(vmInvoke_returnAddress)-LOCAL(vmJumpAndInvoke_offset)(%esi),%esi
# else
addl $_GLOBAL_OFFSET_TABLE_,%esi
movl GLOBAL(vmInvoke_returnAddress)@GOT(%esi),%esi
# endif
#endif
movl %esi,(%ecx)
// copy arguments into place
movl $0,%esi
movl 16(%esp),%edx
movl 20(%esp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movl (%eax,%esi,1),%edi
movl %edi,4(%ecx,%esi,1)
addl $4,%esi
LOCAL(vmJumpAndInvoke_argumentTest):
cmpl %edx,%esi
jb LOCAL(vmJumpAndInvoke_argumentLoop)
movl 8(%esp),%esi
// the arguments have been copied, so we can set the real stack
// pointer now
movl %ecx,%esp
jmp *%esi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // AVIAN_CONTINUATIONS
#else
#error unsupported architecture
#endif //def __x86_64__

View File

@ -2189,6 +2189,8 @@ GcContinuation* makeCurrentContinuation(MyThread* t,
*targetIp = 0; *targetIp = 0;
while (*targetIp == 0) { while (*targetIp == 0) {
assertT(t, ip);
GcMethod* method = methodForIp(t, ip); GcMethod* method = methodForIp(t, ip);
if (method) { if (method) {
PROTECT(t, method); PROTECT(t, method);
@ -9783,22 +9785,22 @@ void compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
if (processor(t)->bootImage) { if (processor(t)->bootImage) {
lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE); lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE);
lir::Register scratch(t->arch->scratch()); lir::RegisterPair scratch(t->arch->scratch());
a->apply(lir::Move, a->apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &table), OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &table),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
lir::Memory proc(scratch.low, index * TargetBytesPerWord); lir::Memory proc(scratch.low, index * TargetBytesPerWord);
a->apply(lir::Move, a->apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &proc), OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &proc),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
a->apply(call ? lir::Call : lir::Jump, a->apply(call ? lir::Call : lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
} else { } else {
lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise( lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise(
reinterpret_cast<intptr_t>(t->thunkTable[index]))); reinterpret_cast<intptr_t>(t->thunkTable[index])));
a->apply(call ? lir::LongCall : lir::LongJump, a->apply(call ? lir::LongCall : lir::LongJump,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &proc)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &proc));
} }
} }
@ -9814,16 +9816,16 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.default_.frameSavedOffset = a->length(); p->thunks.default_.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread()); lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, compileMethodIndex); compileCall(t, &context, compileMethodIndex);
a->popFrame(t->arch->alignFrameSize(1)); a->popFrame(t->arch->alignFrameSize(1));
lir::Register result(t->arch->returnLow()); lir::RegisterPair result(t->arch->returnLow());
a->apply(lir::Jump, a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0); p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
@ -9835,7 +9837,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
Context context(t); Context context(t);
avian::codegen::Assembler* a = context.assembler; avian::codegen::Assembler* a = context.assembler;
lir::Register class_(t->arch->virtualCallTarget()); lir::RegisterPair class_(t->arch->virtualCallTarget());
lir::Memory virtualCallTargetSrc( lir::Memory virtualCallTargetSrc(
t->arch->stack(), t->arch->stack(),
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()) (t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
@ -9843,41 +9845,41 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
a->apply(lir::Move, a->apply(lir::Move,
OperandInfo( OperandInfo(
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetSrc), TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetSrc),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_));
lir::Memory virtualCallTargetDst(t->arch->thread(), lir::Memory virtualCallTargetDst(t->arch->thread(),
TARGET_THREAD_VIRTUALCALLTARGET); TARGET_THREAD_VIRTUALCALLTARGET);
a->apply( a->apply(
lir::Move, lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_),
OperandInfo( OperandInfo(
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetDst)); TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetDst));
lir::Register index(t->arch->virtualCallIndex()); lir::RegisterPair index(t->arch->virtualCallIndex());
lir::Memory virtualCallIndex(t->arch->thread(), lir::Memory virtualCallIndex(t->arch->thread(),
TARGET_THREAD_VIRTUALCALLINDEX); TARGET_THREAD_VIRTUALCALLINDEX);
a->apply( a->apply(
lir::Move, lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &index), OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &index),
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &virtualCallIndex)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallIndex));
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP); a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
p->thunks.defaultVirtual.frameSavedOffset = a->length(); p->thunks.defaultVirtual.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread()); lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, compileVirtualMethodIndex); compileCall(t, &context, compileVirtualMethodIndex);
a->popFrame(t->arch->alignFrameSize(1)); a->popFrame(t->arch->alignFrameSize(1));
lir::Register result(t->arch->returnLow()); lir::RegisterPair result(t->arch->returnLow());
a->apply(lir::Jump, a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0); p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
@ -9893,8 +9895,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.native.frameSavedOffset = a->length(); p->thunks.native.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread()); lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, invokeNativeIndex); compileCall(t, &context, invokeNativeIndex);
@ -9915,8 +9917,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.aioob.frameSavedOffset = a->length(); p->thunks.aioob.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread()); lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, throwArrayIndexOutOfBoundsIndex); compileCall(t, &context, throwArrayIndexOutOfBoundsIndex);
@ -9934,8 +9936,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.stackOverflow.frameSavedOffset = a->length(); p->thunks.stackOverflow.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread()); lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, throwStackOverflowIndex); compileCall(t, &context, throwStackOverflowIndex);
@ -10058,17 +10060,17 @@ uintptr_t compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
avian::codegen::ResolvedPromise indexPromise(index); avian::codegen::ResolvedPromise indexPromise(index);
lir::Constant indexConstant(&indexPromise); lir::Constant indexConstant(&indexPromise);
lir::Register indexRegister(t->arch->virtualCallIndex()); lir::RegisterPair indexRegister(t->arch->virtualCallIndex());
a->apply( a->apply(
lir::Move, lir::Move,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &indexConstant), OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &indexConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &indexRegister)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &indexRegister));
avian::codegen::ResolvedPromise defaultVirtualThunkPromise( avian::codegen::ResolvedPromise defaultVirtualThunkPromise(
defaultVirtualThunk(t)); defaultVirtualThunk(t));
lir::Constant thunk(&defaultVirtualThunkPromise); lir::Constant thunk(&defaultVirtualThunkPromise);
a->apply(lir::Jump, a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &thunk)); OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &thunk));
*size = a->endBlock(false)->resolve(0, 0); *size = a->endBlock(false)->resolve(0, 0);

145
src/i386.S Normal file
View File

@ -0,0 +1,145 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret

View File

@ -50,20 +50,11 @@ bool writeObject(uint8_t* data,
OutputStream* out, OutputStream* out,
const char* startName, const char* startName,
const char* endName, const char* endName,
const char* format, Platform* platform,
const char* architecture,
unsigned alignment, unsigned alignment,
bool writable, bool writable,
bool executable) bool executable)
{ {
Platform* platform = Platform::getPlatform(
PlatformInfo(PlatformInfo::formatFromString(format),
PlatformInfo::archFromString(architecture)));
if (!platform) {
fprintf(stderr, "unsupported platform: %s/%s\n", format, architecture);
return false;
}
SymbolInfo symbols[] = {SymbolInfo(0, startName), SymbolInfo(size, endName)}; SymbolInfo symbols[] = {SymbolInfo(0, startName), SymbolInfo(size, endName)};
@ -113,6 +104,19 @@ int main(int argc, const char** argv)
} }
} }
const char* format = argv[5];
const char* architecture = argv[6];
Platform* platform = Platform::getPlatform(
PlatformInfo(PlatformInfo::formatFromString(format),
PlatformInfo::archFromString(architecture)));
if (!platform) {
fprintf(stderr, "unsupported platform: %s/%s\n", format, architecture);
return 1;
}
uint8_t* data = 0; uint8_t* data = 0;
unsigned size; unsigned size;
int fd = open(argv[1], O_RDONLY); int fd = open(argv[1], O_RDONLY);
@ -148,8 +152,7 @@ int main(int argc, const char** argv)
&out, &out,
argv[3], argv[3],
argv[4], argv[4],
argv[5], platform,
argv[6],
alignment, alignment,
writable, writable,
executable); executable);

View File

@ -49,6 +49,7 @@
#define EM_386 3 #define EM_386 3
#define EM_X86_64 62 #define EM_X86_64 62
#define EM_ARM 40 #define EM_ARM 40
#define EM_AARCH64 183
#define SHT_PROGBITS 1 #define SHT_PROGBITS 1
#define SHT_SYMTAB 2 #define SHT_SYMTAB 2
@ -129,6 +130,8 @@ unsigned getElfPlatform(PlatformInfo::Architecture arch)
return EM_386; return EM_386;
case PlatformInfo::Arm: case PlatformInfo::Arm:
return EM_ARM; return EM_ARM;
case PlatformInfo::Arm64:
return EM_AARCH64;
default: default:
return ~0; return ~0;
} }
@ -398,6 +401,7 @@ class ElfPlatform : public Platform {
ElfPlatform<uint32_t> elfX86Platform(PlatformInfo::x86); ElfPlatform<uint32_t> elfX86Platform(PlatformInfo::x86);
ElfPlatform<uint32_t> elfArmPlatform(PlatformInfo::Arm); ElfPlatform<uint32_t> elfArmPlatform(PlatformInfo::Arm);
ElfPlatform<uint64_t> elfArm64Platform(PlatformInfo::Arm64);
ElfPlatform<uint64_t> elfX86_64Platform(PlatformInfo::x86_64); ElfPlatform<uint64_t> elfX86_64Platform(PlatformInfo::x86_64);
} // namespace } // namespace

View File

@ -33,10 +33,12 @@
#define CPU_TYPE_I386 7 #define CPU_TYPE_I386 7
#define CPU_TYPE_X86_64 (CPU_TYPE_I386 | CPU_ARCH_ABI64) #define CPU_TYPE_X86_64 (CPU_TYPE_I386 | CPU_ARCH_ABI64)
#define CPU_TYPE_ARM 12 #define CPU_TYPE_ARM 12
#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
#define CPU_SUBTYPE_I386_ALL 3 #define CPU_SUBTYPE_I386_ALL 3
#define CPU_SUBTYPE_X86_64_ALL CPU_SUBTYPE_I386_ALL #define CPU_SUBTYPE_X86_64_ALL CPU_SUBTYPE_I386_ALL
#define CPU_SUBTYPE_ARM_V7 9 #define CPU_SUBTYPE_ARM_V7 9
#define CPU_SUBTYPE_ARM_V8 13
namespace { namespace {
@ -157,6 +159,10 @@ class MachOPlatform : public Platform {
cpuType = CPU_TYPE_ARM; cpuType = CPU_TYPE_ARM;
cpuSubType = CPU_SUBTYPE_ARM_V7; cpuSubType = CPU_SUBTYPE_ARM_V7;
break; break;
case PlatformInfo::Arm64:
cpuType = CPU_TYPE_ARM64;
cpuSubType = CPU_SUBTYPE_ARM_V8;
break;
default: default:
// should never happen (see MachOPlatform declarations at bottom) // should never happen (see MachOPlatform declarations at bottom)
fprintf(stderr, "unsupported architecture: %d\n", info.arch); fprintf(stderr, "unsupported architecture: %d\n", info.arch);
@ -280,6 +286,7 @@ class MachOPlatform : public Platform {
MachOPlatform<uint32_t> darwinx86Platform(PlatformInfo::x86); MachOPlatform<uint32_t> darwinx86Platform(PlatformInfo::x86);
MachOPlatform<uint32_t> darwinArmPlatform(PlatformInfo::Arm); MachOPlatform<uint32_t> darwinArmPlatform(PlatformInfo::Arm);
MachOPlatform<uint64_t> darwinArm64Platform(PlatformInfo::Arm64);
MachOPlatform<uint64_t> darwinx86_64Platform(PlatformInfo::x86_64); MachOPlatform<uint64_t> darwinx86_64Platform(PlatformInfo::x86_64);
} // namespace } // namespace

View File

@ -115,6 +115,8 @@ PlatformInfo::Architecture PlatformInfo::archFromString(const char* arch)
return Architecture::x86_64; return Architecture::x86_64;
} else if (strcmp(arch, "arm") == 0) { } else if (strcmp(arch, "arm") == 0) {
return Architecture::Arm; return Architecture::Arm;
} else if (strcmp(arch, "arm64") == 0) {
return Architecture::Arm64;
} else { } else {
return Architecture::UnknownArch; return Architecture::UnknownArch;
} }

View File

@ -1408,8 +1408,20 @@ void writeInitializations(Output* out, Module& module)
} }
} }
void writeJavaInitialization(Output* out, Class* cl) void writeJavaInitialization(Output* out,
Class* cl,
std::set<Class*>& alreadyInited)
{ {
if (alreadyInited.find(cl) != alreadyInited.end()) {
return;
}
alreadyInited.insert(cl);
if (cl->super) {
writeJavaInitialization(out, cl->super, alreadyInited);
}
out->write("bootJavaClass(t, Gc::"); out->write("bootJavaClass(t, Gc::");
out->write(capitalize(cl->name)); out->write(capitalize(cl->name));
out->write("Type, "); out->write("Type, ");
@ -1436,10 +1448,11 @@ void writeJavaInitialization(Output* out, Class* cl)
void writeJavaInitializations(Output* out, Module& module) void writeJavaInitializations(Output* out, Module& module)
{ {
std::set<Class*> alreadyInited;
for (const auto p : module.classes) { for (const auto p : module.classes) {
Class* cl = p.second; Class* cl = p.second;
if (cl->javaName.size()) { if (cl->javaName.size()) {
writeJavaInitialization(out, cl); writeJavaInitialization(out, cl, alreadyInited);
} }
} }
} }

View File

@ -21,8 +21,6 @@
.text .text
#ifdef __x86_64__
#define CHECKPOINT_THREAD 8 #define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48 #define CHECKPOINT_STACK 48
@ -340,130 +338,3 @@ GLOBAL(vmRun_returnAddress):
ret ret
#endif // not __MINGW32__ #endif // not __MINGW32__
#elif defined __i386__
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret
#endif // __i386__

View File

@ -16,6 +16,12 @@ public class NullPointer {
} }
public static void main(String[] args) { public static void main(String[] args) {
try {
((Object) null).getClass();
} catch (Exception e) {
e.printStackTrace();
}
try { try {
throw_(null); throw_(null);
throw new RuntimeException(); throw new RuntimeException();

View File

@ -1,9 +1,28 @@
#!/bin/sh #!/usr/bin/env bash
set -e set -eo pipefail
root_dir=$(pwd) root_dir=$(pwd)
flags="${@}"
is-mac() {
if [[ $(uname -s) == "Darwin" || ${TRAVIS_OS_NAME} == "osx" ]]; then
return 0
fi
return 1
}
install-deps() {
if is-mac; then
echo "------ Installing dependencies for Mac ------"
else
echo "------ Installing dependencies for Linux ------"
sudo apt-get update -qq
sudo apt-get install -y libc6-dev-i386 mingw-w64 gcc-mingw-w64-x86-64 g++-mingw-w64-i686 binutils-mingw-w64-x86-64 lib32z1-dev zlib1g-dev g++-mingw-w64-x86-64
fi
}
run() { run() {
echo '===============================================' echo '==============================================='
if [ ! $(pwd) = ${root_dir} ]; then if [ ! $(pwd) = ${root_dir} ]; then
@ -23,10 +42,24 @@ run_cmake() {
cd .. cd ..
} }
flags="${@}" publish() {
local platforms="${1}"
local arches="${2}"
local platform
for platform in ${platforms}; do
local arch
for arch in ${arches}; do
echo "------ Publishing ${platform}-${arch} ------"
./gradlew artifactoryPublish -Pplatform=${platform} -Parch=${arch}
done
done
}
has_flag() { has_flag() {
local arg=$1 local arg=${1}
local f
for f in ${flags}; do for f in ${flags}; do
local key=$(echo $f | awk -F '=' '{print $1}') local key=$(echo $f | awk -F '=' '{print $1}')
if [ ${key} = ${arg} ]; then if [ ${key} = ${arg} ]; then
@ -36,9 +69,22 @@ has_flag() {
return 1 return 1
} }
make_target=test ### START ###
test `uname -o` = "Cygwin" || run_cmake -DCMAKE_BUILD_TYPE=Debug install-deps
if [[ "${1}" == "PUBLISH" ]]; then
if is-mac; then
publish "macosx" "i386 x86_64"
elif [[ $(uname -s) == "Linux" ]]; then
publish "linux windows" "i386 x86_64"
fi
else
if [[ $(uname -o) != "Cygwin" ]]; then
run_cmake -DCMAKE_BUILD_TYPE=Debug
fi
make_target=test
run make jdk-test run make jdk-test
run make ${flags} ${make_target} run make ${flags} ${make_target}
@ -54,3 +100,4 @@ run make ${flags} process=interpret ${make_target}
run make ${flags} tails=true continuations=true heapdump=true ${make_target} run make ${flags} tails=true continuations=true heapdump=true ${make_target}
run make ${flags} codegen-targets=all run make ${flags} codegen-targets=all
fi

View File

@ -79,6 +79,6 @@ TEST(ArchitecturePlan)
(lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk); (lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
assertFalse(thunk); assertFalse(thunk);
assertNotEqual(static_cast<uint8_t>(0), mask.typeMask); assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
assertNotEqual(static_cast<uint64_t>(0), mask.registerMask); assertNotEqual(static_cast<uint64_t>(0), (uint64_t)mask.lowRegisterMask);
} }
} }

View File

@ -19,18 +19,29 @@ using namespace vm;
TEST(RegisterIterator) TEST(RegisterIterator)
{ {
RegisterMask regs(0x55); BoundedRegisterMask regs(0x55);
assertEqual<unsigned>(0, regs.start); assertEqual<unsigned>(0, regs.start);
assertEqual<unsigned>(7, regs.limit); assertEqual<unsigned>(7, regs.limit);
RegisterIterator it(regs); for(int i = 0; i < 64; i++) {
assertTrue(it.hasNext()); assertEqual<unsigned>(i, BoundedRegisterMask(static_cast<uint64_t>(1) << i).start);
assertEqual<unsigned>(0, it.next()); assertEqual<unsigned>(i + 1, BoundedRegisterMask(static_cast<uint64_t>(1) << i).limit);
assertTrue(it.hasNext()); }
assertEqual<unsigned>(2, it.next());
assertTrue(it.hasNext()); auto it = regs.begin();
assertEqual<unsigned>(4, it.next()); auto end = regs.end();
assertTrue(it.hasNext());
assertEqual<unsigned>(6, it.next()); assertTrue(it != end);
assertFalse(it.hasNext()); assertEqual<unsigned>(6, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(4, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(2, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(0, (*it).index());
++it;
assertFalse(it != end);
} }