Merge branch 'master' of https://github.com/ReadyTalk/avian into avian-pack

This commit is contained in:
Ilya Mizus 2015-01-17 18:34:21 +03:00
commit 89ab73c7e2
80 changed files with 5029 additions and 2006 deletions

View File

@ -1,12 +1,23 @@
language: cpp
cache: apt
os:
- linux
- osx
env:
matrix:
- BUILD_STEP=""
- BUILD_STEP="PUBLISH"
global:
- TERM=dumb
- secure: Pe7OLUvdk3DeTAShyaYsdAXBcnjdNOrtfTPt1mCP5+JKBUw3z0n7BpTgATVRBmkn866pDqxL0N3+jBgjKUpEpqOj23bTB2AeDdIzlYkFGvTd3GQQV1lMFn16I9LOgWJhkfICTJ1rIYiNf8xSkJoeKGB7eGUM0KGsTvelFfsaGVc=
# BINTRAY_USER
- secure: CauPpj2QrbgEaePEPQx+FDeZqc46HmWXHfdAyn+DA9g6np3RMyc+N/ESrJ9efc4E772rnxory2QRyyNffzi29ceNTpIzsIRDuk5WvvC+zePEsQm8kX1afsDK5g16QEvJN24jGSW9ci9uxuknqjeKVOukcFQdxssIyDe11tYWJeI=
# BINTRAY_API_KEY
- secure: Gao6KTkCbqrdCDI2hQswh1v+SpHBeF4hR1WXmCe9gjmPyJb731K2eWigQOrhPOw7Ns3nubo1FwCb5YevYmwPMLryh0f4sKJyqLL1MLbffNl5GttNF2oO3p73cJpgBfHdzabAMwAYGYbneqUZ0Qn4K8RkzXUaoBDv465KmZhqbA0=
script: "./test/ci.sh"
after_success:
- "./gradlew artifactoryPublish"
- secure: rh1utD4shKmYtokItuRYEF9WsfTnvZO5XqnTU4DHTS7quHHgLihtOO2/3+B+2W2hEd5Obr2or8zx+zmzWcNUyLokZ0j/FRLWSScNkLzTtm12pupLrncY+/g1NIdfbhn+OLRIzBz6zB6m6a2qWFEJ+bScUNGD/7wZVtzkujqlDEE=
- secure: j9DOzZMCYk/BzhKK9u4XMKpCzyGOsvP2cLTp6cXE7/tkWDAPVv6BFmeqNbiLTEqk0aGX+HYbY/2YVtpRZmDzfeWtnBFF5mL1Y1tgzx1Kf155C+P6rZgt5PiQTUdXlp2umuRifY1BbXAPc3DZ2UOPUjWKnLHVbZLQRgO1zimmMx8=
matrix:
fast_finish: true
exclude:
- os: osx
env: BUILD_STEP=""
script: ./test/ci.sh ${BUILD_STEP}

112
README.md
View File

@ -20,7 +20,7 @@ to use forward slashes in the path.
$ export JAVA_HOME=$(/usr/libexec/java_home)
$ make
$ build/macosx-x86_64/avian -cp build/macosx-x86_64/test Hello
#### on Windows (MSYS):
$ git clone git@github.com:ReadyTalk/win64.git ../win64
$ export JAVA_HOME="C:/Program Files/Java/jdk1.7.0_45"
@ -59,10 +59,10 @@ Supported Platforms
Avian can currently target the following platforms:
* Linux (i386, x86_64, and ARM)
* Linux (i386, x86_64, ARM, and ARM64)
* Windows (i386 and x86_64)
* Mac OS X (i386 and x86_64)
* Apple iOS (i386 and ARM)
* Apple iOS (i386, ARM, and ARM64)
* FreeBSD (i386, x86_64)
@ -86,7 +86,7 @@ certain flags described below, all of which are optional.
$ make \
platform={linux,windows,macosx,ios,freebsd} \
arch={i386,x86_64,arm} \
arch={i386,x86_64,arm,arm64} \
process={compile,interpret} \
mode={debug,debug-fast,fast,small} \
lzma=<lzma source directory> \
@ -99,26 +99,26 @@ certain flags described below, all of which are optional.
openjdk-src=<openjdk source directory> \
android=<android source directory>
* `platform` - the target platform
* `platform` - the target platform
* _default:_ output of $(uname -s | tr [:upper:] [:lower:]),
normalized in some cases (e.g. CYGWIN_NT-5.1 -> windows)
normalized in some cases (e.g. CYGWIN_NT-5.1 -> windows)
* `arch` - the target architecture
* `arch` - the target architecture
* _default:_ output of $(uname -m), normalized in some cases
(e.g. i686 -> i386)
* `process` - choice between pure interpreter or JIT compiler
* `process` - choice between pure interpreter or JIT compiler
* _default:_ compile
* `mode` - which set of compilation flags to use to determine
optimization level, debug symbols, and whether to enable
assertions
assertions
* _default:_ fast
* `lzma` - if set, support use of LZMA to compress embedded JARs and
boot images. The value of this option should be a directory
containing a recent LZMA SDK (available [here](http://www.7-zip.org/sdk.html)). Currently, only version 9.20 of
the SDK has been tested, but other versions might work.
the SDK has been tested, but other versions might work.
* _default:_ not set
* `armv6` - if true, don't use any instructions newer than armv6. By
@ -129,42 +129,42 @@ memory barrier instructions to ensure cache coherency
class library and ahead-of-time compiled methods. This option is
only valid for process=compile builds. Note that you may need to
specify both build-arch=x86_64 and arch=x86_64 on 64-bit systems
where "uname -m" prints "i386".
where "uname -m" prints "i386".
* _default:_ false
* `heapdump` - if true, implement avian.Machine.dumpHeap(String),
which, when called, will generate a snapshot of the heap in a
simple, ad-hoc format for memory profiling purposes. See
heapdump.cpp for details.
heapdump.cpp for details.
* _default:_ false
* `tails` - if true, optimize each tail call by replacing the caller's
stack frame with the callee's. This convention ensures proper
tail recursion, suitable for languages such as Scheme. This
option is only valid for process=compile builds.
option is only valid for process=compile builds.
* _default:_ false
* `continuations` - if true, support continuations via the
avian.Continuations methods callWithCurrentContinuation and
dynamicWind. See Continuations.java for details. This option is
only valid for process=compile builds.
only valid for process=compile builds.
* _default:_ false
* `use-clang` - if true, use LLVM's clang instead of GCC to build.
Note that this does not currently affect cross compiles, only
native builds.
native builds.
* _default:_ false
* `openjdk` - if set, use the OpenJDK class library instead of the
default Avian class library. See "Building with the OpenJDK Class
Library" below for details.
Library" below for details.
* _default:_ not set
* `openjdk-src` - if this and the openjdk option above are both set,
build an embeddable VM using the OpenJDK class library. The JNI
components of the OpenJDK class library will be built from the
sources found under the specified directory. See "Building with
the OpenJDK Class Library" below for details.
the OpenJDK Class Library" below for details.
* _default:_ not set
* `android` - if set, use the Android class library instead of the
@ -184,7 +184,7 @@ Note that not all combinations of these flags are valid. For instance,
non-jailbroken iOS devices do not allow JIT compilation, so only
process=interpret or bootimage=true builds will run on such
devices. See [here](https://github.com/ReadyTalk/hello-ios) for an
example of an Xcode project for iOS which uses Avian.
example of an Xcode project for iOS which uses Avian.
If you are compiling for Windows, you may either cross-compile using
MinGW or build natively on Windows under MSYS or Cygwin.
@ -366,7 +366,7 @@ the following, starting from the Avian directory:
git clone https://android.googlesource.com/platform/bionic
(cd bionic && \
git checkout 84983592ade3ec7d72d082262fb6646849979bfc)
git clone https://android.googlesource.com/platform/system/core \
system/core
(cd system/core && \
@ -517,37 +517,37 @@ setting the boot classpath to "[bootJar]".
$ cat >embedded-jar-main.cpp <<EOF
#include "stdint.h"
#include "jni.h"
#include "stdlib.h"
#include "stdlib.h"
#if (defined __MINGW32__) || (defined _MSC_VER)
# define EXPORT __declspec(dllexport)
#else
# define EXPORT __attribute__ ((visibility("default"))) \
__attribute__ ((used))
#endif
#if (! defined __x86_64__) && ((defined __MINGW32__) || (defined _MSC_VER))
# define SYMBOL(x) binary_boot_jar_##x
#else
# define SYMBOL(x) _binary_boot_jar_##x
#endif
extern "C" {
extern const uint8_t SYMBOL(start)[];
extern const uint8_t SYMBOL(end)[];
EXPORT const uint8_t*
bootJar(unsigned* size)
{
*size = SYMBOL(end) - SYMBOL(start);
return SYMBOL(start);
}
} // extern "C"
extern "C" void __cxa_pure_virtual(void) { abort(); }
extern "C" void __cxa_pure_virtual(void) { abort(); }
int
main(int ac, const char** av)
{
@ -555,17 +555,17 @@ setting the boot classpath to "[bootJar]".
vmArgs.version = JNI_VERSION_1_2;
vmArgs.nOptions = 1;
vmArgs.ignoreUnrecognized = JNI_TRUE;
JavaVMOption options[vmArgs.nOptions];
vmArgs.options = options;
options[0].optionString = const_cast<char*>("-Xbootclasspath:[bootJar]");
JavaVM* vm;
void* env;
JNI_CreateJavaVM(&vm, &env, &vmArgs);
JNIEnv* e = static_cast<JNIEnv*>(env);
jclass c = e->FindClass("Hello");
if (not e->ExceptionCheck()) {
jmethodID m = e->GetStaticMethodID(c, "main", "([Ljava/lang/String;)V");
@ -577,21 +577,21 @@ setting the boot classpath to "[bootJar]".
for (int i = 1; i < ac; ++i) {
e->SetObjectArrayElement(a, i-1, e->NewStringUTF(av[i]));
}
e->CallStaticVoidMethod(c, m, a);
}
}
}
}
int exitCode = 0;
if (e->ExceptionCheck()) {
exitCode = -1;
e->ExceptionDescribe();
}
vm->DestroyJavaVM();
return exitCode;
}
EOF
@ -745,13 +745,13 @@ containing them. See the previous example for instructions.
$ cat >bootimage-main.cpp <<EOF
#include "stdint.h"
#include "jni.h"
#if (defined __MINGW32__) || (defined _MSC_VER)
# define EXPORT __declspec(dllexport)
#else
# define EXPORT __attribute__ ((visibility("default")))
#endif
#if (! defined __x86_64__) && ((defined __MINGW32__) || (defined _MSC_VER))
# define BOOTIMAGE_BIN(x) binary_bootimage_bin_##x
# define CODEIMAGE_BIN(x) binary_codeimage_bin_##x
@ -759,31 +759,31 @@ containing them. See the previous example for instructions.
# define BOOTIMAGE_BIN(x) _binary_bootimage_bin_##x
# define CODEIMAGE_BIN(x) _binary_codeimage_bin_##x
#endif
extern "C" {
extern const uint8_t BOOTIMAGE_BIN(start)[];
extern const uint8_t BOOTIMAGE_BIN(end)[];
EXPORT const uint8_t*
bootimageBin(unsigned* size)
{
*size = BOOTIMAGE_BIN(end) - BOOTIMAGE_BIN(start);
return BOOTIMAGE_BIN(start);
}
extern const uint8_t CODEIMAGE_BIN(start)[];
extern const uint8_t CODEIMAGE_BIN(end)[];
EXPORT const uint8_t*
codeimageBin(unsigned* size)
{
*size = CODEIMAGE_BIN(end) - CODEIMAGE_BIN(start);
return CODEIMAGE_BIN(start);
}
} // extern "C"
int
main(int ac, const char** av)
{
@ -791,21 +791,21 @@ containing them. See the previous example for instructions.
vmArgs.version = JNI_VERSION_1_2;
vmArgs.nOptions = 2;
vmArgs.ignoreUnrecognized = JNI_TRUE;
JavaVMOption options[vmArgs.nOptions];
vmArgs.options = options;
options[0].optionString
= const_cast<char*>("-Davian.bootimage=bootimageBin");
options[1].optionString
= const_cast<char*>("-Davian.codeimage=codeimageBin");
JavaVM* vm;
void* env;
JNI_CreateJavaVM(&vm, &env, &vmArgs);
JNIEnv* e = static_cast<JNIEnv*>(env);
jclass c = e->FindClass("Hello");
if (not e->ExceptionCheck()) {
jmethodID m = e->GetStaticMethodID(c, "main", "([Ljava/lang/String;)V");
@ -817,25 +817,25 @@ containing them. See the previous example for instructions.
for (int i = 1; i < ac; ++i) {
e->SetObjectArrayElement(a, i-1, e->NewStringUTF(av[i]));
}
e->CallStaticVoidMethod(c, m, a);
}
}
}
}
int exitCode = 0;
if (e->ExceptionCheck()) {
exitCode = -1;
e->ExceptionDescribe();
}
vm->DestroyJavaVM();
return exitCode;
}
EOF
$ g++ -I$JAVA_HOME/include -I$JAVA_HOME/include/linux \
-D_JNI_IMPLEMENTATION_ -c bootimage-main.cpp -o main.o

View File

@ -107,12 +107,6 @@ model {
operatingSystem SupportedOS.valueOf(platform.toUpperCase())
architecture "${arch}"
}
if(platformArch != currentPlatformArch) {
create(currentPlatformArch) {
operatingSystem SupportedOS.CURRENT
architecture "${currentArch}"
}
}
}
tasks {
@ -235,7 +229,7 @@ publishing {
artifact("${nativeBuildDir}/avian${binSuffix}") {
name "avian"
type publishBinSuffix
extension binSuffix
extension publishBinSuffix
}
artifact("${nativeBuildDir}/libavian.a") {
@ -249,6 +243,11 @@ publishing {
}
artifactoryPublish {
onlyIf {
// TRAVIS_BRANCH reports master if it is a master build or a PR going to master
// TRAVIS_PULL_REQUEST reports false if not a pull request and the PR number if it is
System.env.'TRAVIS_BRANCH' == "master" && System.env.'TRAVIS_PULL_REQUEST' == "false"
}
dependsOn assemble
}
@ -281,4 +280,4 @@ artifactory {
task wrapper(type: Wrapper) {
distributionUrl = 'http://services.gradle.org/distributions/gradle-2.0-bin.zip'
}
}

View File

@ -38,7 +38,7 @@ public class Classes {
public static native VMClass primitiveClass(char name);
public static native void initialize(VMClass vmClass);
public static native boolean isAssignableFrom(VMClass a, VMClass b);
public static native VMClass getVMClass(Object o);
@ -134,7 +134,7 @@ public class Classes {
array[i] = parseAnnotationValue(loader, pool, in);
}
return array;
}
}
default: throw new AssertionError();
}
@ -207,7 +207,7 @@ public class Classes {
while (spec[end] != ';') ++ end;
++ end;
break;
default:
++ end;
}
@ -295,9 +295,9 @@ public class Classes {
}
Class c = loader.loadClass(name);
VMClass vmc = SystemClassLoader.vmClass(c);
Classes.link(vmc, loader);
link(vmc, loader);
if (initialize) {
Classes.initialize(vmc);
initialize(vmc);
}
return c;
}
@ -315,7 +315,7 @@ public class Classes {
} else {
if (name.length() == 1) {
return SystemClassLoader.getClass
(Classes.primitiveClass(name.charAt(0)));
(primitiveClass(name.charAt(0)));
} else {
throw new ClassNotFoundException(name);
}
@ -378,7 +378,7 @@ public class Classes {
public static int findField(VMClass vmClass, String name) {
if (vmClass.fieldTable != null) {
Classes.link(vmClass);
link(vmClass);
for (int i = 0; i < vmClass.fieldTable.length; ++i) {
if (toString(vmClass.fieldTable[i].name).equals(name)) {
@ -426,7 +426,7 @@ public class Classes {
{
VMMethod[] methodTable = vmClass.methodTable;
if (methodTable != null) {
Classes.link(vmClass);
link(vmClass);
if (parameterTypes == null) {
parameterTypes = new Class[0];
@ -464,7 +464,7 @@ public class Classes {
Method[] array = new Method[countMethods(vmClass, publicOnly)];
VMMethod[] methodTable = vmClass.methodTable;
if (methodTable != null) {
Classes.link(vmClass);
link(vmClass);
int ai = 0;
for (int i = 0, j = declaredMethodCount(vmClass); i < j; ++i) {
@ -498,7 +498,7 @@ public class Classes {
public static Field[] getFields(VMClass vmClass, boolean publicOnly) {
Field[] array = new Field[countFields(vmClass, publicOnly)];
if (vmClass.fieldTable != null) {
Classes.link(vmClass);
link(vmClass);
int ai = 0;
for (int i = 0; i < vmClass.fieldTable.length; ++i) {
@ -568,9 +568,9 @@ public class Classes {
return new ProtectionDomain(source, p);
}
public static native Method makeMethod(Class c, int slot);
public static native Field makeField(Class c, int slot);
private static native void acquireClassLock();

View File

@ -18,7 +18,7 @@ package java.security;
*/
public class AccessController {
public static Object doPrivileged (PrivilegedAction action) {
public static <T> T doPrivileged (PrivilegedAction<T> action) {
return action.run();
}

View File

@ -340,7 +340,101 @@ public class Collections {
}
public static <V> Set<V> synchronizedSet(Set<V> set) {
return new SynchronizedSet<V> (new Object(), set);
return new SynchronizedSet<V> (set, set);
}
static class SynchronizedList<T>
extends SynchronizedCollection<T>
implements List<T>
{
private final List<T> list;
public SynchronizedList(List<T> list) {
super(list, list);
this.list = list;
}
@Override
public T get(int index) {
synchronized (lock) {
return list.get(index);
}
}
@Override
public T set(int index, T value) {
synchronized (lock) {
return list.set(index, value);
}
}
@Override
public T remove(int index) {
synchronized (lock) {
return list.remove(index);
}
}
@Override
public void add(int index, T element) {
synchronized (lock) {
list.add(index, element);
}
}
@Override
public boolean addAll(int startIndex, Collection<? extends T> c) {
synchronized (lock) {
return list.addAll(startIndex, c);
}
}
@Override
public int indexOf(Object value) {
synchronized (lock) {
return list.indexOf(value);
}
}
@Override
public int lastIndexOf(Object value) {
synchronized (lock) {
return list.lastIndexOf(value);
}
}
@Override
public ListIterator<T> listIterator(int index) {
// as described in the javadocs, user should be synchronized on list before calling
return list.listIterator(index);
}
@Override
public ListIterator<T> listIterator() {
// as described in the javadocs, user should be synchronized on list before calling
return list.listIterator();
}
}
static class RandomAccessSynchronizedList<T>
extends SynchronizedList<T>
implements RandomAccess
{
public RandomAccessSynchronizedList(List<T> list) {
super(list);
}
}
public static <T> List<T> synchronizedList(List<T> list) {
List<T> result;
if (list instanceof RandomAccess) {
result = new RandomAccessSynchronizedList<T>(list);
} else {
result = new SynchronizedList<T>(list);
}
return result;
}
static class SynchronizedIterator<T> implements Iterator<T> {

View File

@ -49,6 +49,14 @@ public class ConcurrentHashMap<K,V>
this();
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this();
}
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
this();
}
public boolean isEmpty() {
return content.size == 0;
}

View File

@ -10,6 +10,6 @@
package java.util.concurrent;
public interface Delayed {
public interface Delayed extends Comparable<Delayed> {
public long getDelay(TimeUnit unit);
}

View File

@ -14,6 +14,8 @@ import java.util.Collection;
public interface ExecutorService extends Executor {
public void shutdown();
public List<Runnable> shutdownNow();
public boolean isShutdown();

View File

@ -0,0 +1,24 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
package java.util.concurrent;
public class Executors {
public static <T> Callable<T> callable(final Runnable task, final T result) {
return new Callable<T>() {
@Override
public T call() throws Exception {
task.run();
return result;
}
};
}
}

71
docker/arm64/Dockerfile Normal file
View File

@ -0,0 +1,71 @@
FROM joshuawarner32/avian-build
MAINTAINER Joshua Warner, joshuawarner32@gmail.com
RUN dpkg --add-architecture arm64 && \
apt-get update && \
mkdir -p /opt/arm64 && \
apt-get download libc6-dev:arm64 \
linux-headers-3.16.0-4-all-arm64:arm64 \
linux-libc-dev:arm64 \
libc6:arm64 \
zlib1g-dev:arm64 \
zlib1g:arm64 && \
for x in *.deb; do \
dpkg -x $x /opt/arm64; \
done && \
rm *.deb && \
apt-get install -y \
wget \
libgmp-dev \
libmpfr-dev \
libmpc-dev \
libisl-dev && \
apt-get clean all && \
for x in $(find /opt/arm64 -type l); do \
r=$(readlink "$x" | sed 's,^/,/opt/arm64/,g'); \
rm "$x"; \
ln -s "$r" "$x"; \
done
RUN mkdir -p /var/src
# Build & install binutils
RUN wget ftp://sourceware.org/pub/binutils/snapshots/binutils-2.23.91.tar.bz2 -O /var/src/binutils.tar.bz2 && \
cd /var/src/ && tar -xjf binutils.tar.bz2 && rm binutils.tar.bz2 && \
cd /var/src/binutils* && \
mkdir build && \
cd build && \
../configure \
--target=aarch64-linux-gnu \
--prefix=/opt/arm64 \
--disable-multilib \
--program-prefix=aarch64-linux-gnu- \
--with-sysroot=/opt/arm64 \
--with-headers=/opt/arm64/usr/include \
--disable-werror && \
make && \
make install && \
cd /var/src && \
rm -rf *
# build & install gcc
RUN wget http://www.netgull.com/gcc/releases/gcc-4.8.2/gcc-4.8.2.tar.bz2 -O /var/src/gcc.tar.bz2 && \
cd /var/src/ && tar -xjf gcc.tar.bz2 && rm gcc.tar.bz2 && \
cd /var/src/gcc* && \
mkdir build && \
cd build && \
../configure \
--target=aarch64-linux-gnu \
--enable-languages=c,c++ \
--prefix=/opt/arm64 \
--disable-multilib \
--program-prefix=aarch64-linux-gnu- \
--with-sysroot=/opt/arm64 \
--with-headers=/opt/arm64/usr/include \
--disable-werror && \
make && \
make install && \
cd /var/src && \
rm -rf *
ENV PATH $PATH:/opt/arm64/bin

View File

@ -8,6 +8,8 @@ if test $# -eq 0; then
exit 1
fi
THE_USER="-u $(id -u "${USER}")"
while test $# -gt 1 ; do
key="$1"
case $key in
@ -16,6 +18,10 @@ while test $# -gt 1 ; do
CONTAINER="$1"
shift
;;
-r|--root)
shift
THE_USER=
;;
--)
shift
break
@ -32,4 +38,4 @@ fi
DIR=$(cd $(dirname "$0") && cd .. && pwd)
docker run --rm -i -t -v "${DIR}":/var/avian -u $(id -u "${USER}") "${CONTAINER}" "${@}"
docker run --rm -i -t -v "${DIR}":/var/avian ${THE_USER} "${CONTAINER}" "${@}"

View File

@ -1,4 +1,4 @@
#Thu Aug 28 14:47:06 MDT 2014
#Fri Jan 02 11:31:32 MST 2015
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME

View File

@ -12,6 +12,7 @@
#define AVIAN_CODEGEN_ARCHITECTURE_H
#include "ir.h"
#include "registers.h"
namespace vm {
class Zone;
@ -27,21 +28,29 @@ namespace codegen {
class Assembler;
class RegisterFile;
class OperandMask {
public:
uint8_t typeMask;
uint64_t registerMask;
RegisterMask lowRegisterMask;
RegisterMask highRegisterMask;
OperandMask(uint8_t typeMask, uint64_t registerMask)
: typeMask(typeMask), registerMask(registerMask)
OperandMask(uint8_t typeMask,
RegisterMask lowRegisterMask,
RegisterMask highRegisterMask)
: typeMask(typeMask),
lowRegisterMask(lowRegisterMask),
highRegisterMask(highRegisterMask)
{
}
OperandMask() : typeMask(~0), registerMask(~static_cast<uint64_t>(0))
OperandMask() : typeMask(~0), lowRegisterMask(AnyRegisterMask), highRegisterMask(AnyRegisterMask)
{
}
void setLowHighRegisterMasks(RegisterMask lowRegisterMask, RegisterMask highRegisterMask) {
this->lowRegisterMask = lowRegisterMask;
this->highRegisterMask = highRegisterMask;
}
};
class Architecture {
@ -50,13 +59,13 @@ class Architecture {
virtual const RegisterFile* registerFile() = 0;
virtual int scratch() = 0;
virtual int stack() = 0;
virtual int thread() = 0;
virtual int returnLow() = 0;
virtual int returnHigh() = 0;
virtual int virtualCallTarget() = 0;
virtual int virtualCallIndex() = 0;
virtual Register scratch() = 0;
virtual Register stack() = 0;
virtual Register thread() = 0;
virtual Register returnLow() = 0;
virtual Register returnHigh() = 0;
virtual Register virtualCallTarget() = 0;
virtual Register virtualCallIndex() = 0;
virtual ir::TargetInfo targetInfo() = 0;
@ -67,14 +76,14 @@ class Architecture {
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
virtual bool reserved(int register_) = 0;
virtual bool reserved(Register register_) = 0;
virtual unsigned frameFootprint(unsigned footprint) = 0;
virtual unsigned argumentFootprint(unsigned footprint) = 0;
virtual bool argumentAlignment() = 0;
virtual bool argumentRegisterAlignment() = 0;
virtual unsigned argumentRegisterCount() = 0;
virtual int argumentRegister(unsigned index) = 0;
virtual Register argumentRegister(unsigned index) = 0;
virtual bool hasLinkRegister() = 0;

View File

@ -15,6 +15,7 @@
#include "avian/zone.h"
#include <avian/codegen/lir.h>
#include <avian/codegen/registers.h>
#include <avian/codegen/promise.h>
namespace avian {
@ -25,11 +26,11 @@ class Architecture;
class OperandInfo {
public:
const unsigned size;
const lir::OperandType type;
const lir::Operand::Type type;
lir::Operand* const operand;
inline OperandInfo(unsigned size,
lir::OperandType type,
lir::Operand::Type type,
lir::Operand* operand)
: size(size), type(type), operand(operand)
{
@ -52,10 +53,10 @@ class Assembler {
public:
class Client {
public:
virtual int acquireTemporary(uint32_t mask = ~static_cast<uint32_t>(0)) = 0;
virtual void releaseTemporary(int r) = 0;
virtual Register acquireTemporary(RegisterMask mask = AnyRegisterMask) = 0;
virtual void releaseTemporary(Register r) = 0;
virtual void save(int r) = 0;
virtual void save(Register r) = 0;
};
class Block {
@ -76,8 +77,8 @@ class Assembler {
virtual void popFrame(unsigned footprint) = 0;
virtual void popFrameForTailCall(unsigned footprint,
int offset,
int returnAddressSurrogate,
int framePointerSurrogate) = 0;
Register returnAddressSurrogate,
Register framePointerSurrogate) = 0;
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
unsigned argumentFootprint) = 0;
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,

View File

@ -11,6 +11,8 @@
#ifndef AVIAN_CODEGEN_LIR_H
#define AVIAN_CODEGEN_LIR_H
#include <avian/codegen/registers.h>
namespace avian {
namespace codegen {
class Promise;
@ -79,19 +81,8 @@ const unsigned NonBranchTernaryOperationCount = FloatMin + 1;
const unsigned BranchOperationCount = JumpIfFloatGreaterOrEqualOrUnordered
- FloatMin;
enum OperandType {
ConstantOperand,
AddressOperand,
RegisterOperand,
MemoryOperand
};
enum ValueType { ValueGeneral, ValueFloat };
const unsigned OperandTypeCount = MemoryOperand + 1;
const int NoRegister = -1;
inline bool isBranch(lir::TernaryOperation op)
{
return op > FloatMin;
@ -128,6 +119,21 @@ inline bool isFloatUnaryOp(lir::BinaryOperation op)
}
class Operand {
public:
enum class Type {
Constant,
Address,
RegisterPair,
Memory
};
const static unsigned TypeCount = (unsigned)Type::Memory + 1;
const static unsigned ConstantMask = 1 << (unsigned)Type::Constant;
const static unsigned AddressMask = 1 << (unsigned)Type::Address;
const static unsigned RegisterPairMask = 1 << (unsigned)Type::RegisterPair;
const static unsigned MemoryMask = 1 << (unsigned)Type::Memory;
};
class Constant : public Operand {
@ -148,26 +154,26 @@ class Address : public Operand {
Promise* address;
};
class Register : public Operand {
class RegisterPair : public Operand {
public:
Register(int low, int high = NoRegister) : low(low), high(high)
RegisterPair(Register low, Register high = NoRegister) : low(low), high(high)
{
}
int low;
int high;
Register low;
Register high;
};
class Memory : public Operand {
public:
Memory(int base, int offset, int index = NoRegister, unsigned scale = 1)
Memory(Register base, int offset, Register index = NoRegister, unsigned scale = 1)
: base(base), offset(offset), index(index), scale(scale)
{
}
int base;
Register base;
int offset;
int index;
Register index;
unsigned scale;
};

View File

@ -16,28 +16,189 @@
namespace avian {
namespace codegen {
class RegisterMask {
public:
uint32_t mask;
uint8_t start;
uint8_t limit;
class RegisterMask;
static unsigned maskStart(uint32_t mask);
static unsigned maskLimit(uint32_t mask);
class Register {
private:
int8_t _index;
public:
explicit constexpr Register(int8_t _index) : _index(_index) {}
constexpr Register() : _index(-1) {}
inline RegisterMask(uint32_t mask)
: mask(mask), start(maskStart(mask)), limit(maskLimit(mask))
{
constexpr bool operator == (Register o) const {
return _index == o._index;
}
constexpr bool operator != (Register o) const {
return !(*this == o);
}
constexpr RegisterMask operator | (Register o) const;
constexpr bool operator < (Register o) const {
return _index < o._index;
}
constexpr bool operator > (Register o) const {
return _index > o._index;
}
constexpr bool operator <= (Register o) const {
return _index <= o._index;
}
constexpr bool operator >= (Register o) const {
return _index >= o._index;
}
constexpr int index() const {
return _index;
}
};
constexpr Register NoRegister;
class RegisterMask {
private:
uint64_t mask;
static constexpr unsigned maskStart(uint64_t mask, unsigned offset = 64) {
return mask == 0 ? (offset & 63) : maskStart(mask << 1, offset - 1);
}
static constexpr unsigned maskLimit(uint64_t mask, unsigned offset = 0) {
return mask == 0 ? offset : maskLimit(mask >> 1, offset + 1);
}
public:
constexpr RegisterMask(uint64_t mask) : mask(mask) {}
constexpr RegisterMask() : mask(0) {}
constexpr RegisterMask(Register reg) : mask(static_cast<uint64_t>(1) << reg.index()) {}
constexpr unsigned begin() const {
return maskStart(mask);
}
constexpr unsigned end() const {
return maskLimit(mask);
}
constexpr RegisterMask operator &(RegisterMask o) const {
return RegisterMask(mask & o.mask);
}
RegisterMask operator &=(RegisterMask o) {
mask &= o.mask;
return *this;
}
constexpr RegisterMask operator |(RegisterMask o) const {
return RegisterMask(mask | o.mask);
}
constexpr bool contains(Register reg) const {
return (mask & (static_cast<uint64_t>(1) << reg.index())) != 0;
}
constexpr bool containsExactly(Register reg) const {
return mask == (mask & (static_cast<uint64_t>(1) << reg.index()));
}
constexpr RegisterMask excluding(Register reg) const {
return RegisterMask(mask & ~(static_cast<uint64_t>(1) << reg.index()));
}
constexpr RegisterMask including(Register reg) const {
return RegisterMask(mask | (static_cast<uint64_t>(1) << reg.index()));
}
constexpr explicit operator uint64_t() const {
return mask;
}
constexpr explicit operator bool() const {
return mask != 0;
}
};
constexpr RegisterMask AnyRegisterMask(~static_cast<uint64_t>(0));
constexpr RegisterMask NoneRegisterMask(0);
constexpr RegisterMask Register::operator | (Register o) const {
return RegisterMask(*this) | o;
}
class RegisterIterator;
class BoundedRegisterMask : public RegisterMask {
public:
uint8_t start;
uint8_t limit;
BoundedRegisterMask(RegisterMask mask)
: RegisterMask(mask), start(mask.begin()), limit(mask.end())
{
}
RegisterIterator begin() const;
RegisterIterator end() const;
};
class RegisterIterator {
public:
int index;
int direction;
int limit;
const RegisterMask mask;
RegisterIterator(int index, int direction, int limit, RegisterMask mask)
: index(index), direction(direction), limit(limit), mask(mask)
{
}
bool operator !=(const RegisterIterator& o) const {
return index != o.index;
}
Register operator *() {
return Register(index);
}
void operator ++ () {
if(index != limit) {
index += direction;
}
while(index != limit && !mask.contains(Register(index))) {
index += direction;
}
}
};
inline RegisterIterator BoundedRegisterMask::begin() const {
// We use reverse iteration... for some reason.
return RegisterIterator(limit - 1, -1, start - 1, *this);
}
inline RegisterIterator BoundedRegisterMask::end() const {
// We use reverse iteration... for some reason.
return RegisterIterator(start - 1, -1, start - 1, *this);
}
inline RegisterIterator begin(BoundedRegisterMask mask) {
return mask.begin();
}
inline RegisterIterator end(BoundedRegisterMask mask) {
return mask.end();
}
class RegisterFile {
public:
RegisterMask allRegisters;
RegisterMask generalRegisters;
RegisterMask floatRegisters;
BoundedRegisterMask allRegisters;
BoundedRegisterMask generalRegisters;
BoundedRegisterMask floatRegisters;
inline RegisterFile(uint32_t generalRegisterMask, uint32_t floatRegisterMask)
RegisterFile(RegisterMask generalRegisterMask, RegisterMask floatRegisterMask)
: allRegisters(generalRegisterMask | floatRegisterMask),
generalRegisters(generalRegisterMask),
floatRegisters(floatRegisterMask)
@ -45,31 +206,6 @@ class RegisterFile {
}
};
class RegisterIterator {
public:
int index;
const RegisterMask& mask;
inline RegisterIterator(const RegisterMask& mask)
: index(mask.start), mask(mask)
{
}
inline bool hasNext()
{
return index < mask.limit;
}
inline int next()
{
int r = index;
do {
index++;
} while (index < mask.limit && !(mask.mask & (1 << index)));
return r;
}
};
} // namespace codegen
} // namespace avian

View File

@ -118,6 +118,7 @@ class PlatformInfo {
x86 = AVIAN_ARCH_X86,
x86_64 = AVIAN_ARCH_X86_64,
Arm = AVIAN_ARCH_ARM,
Arm64 = AVIAN_ARCH_ARM64,
UnknownArch = AVIAN_ARCH_UNKNOWN
};

109
makefile
View File

@ -7,12 +7,13 @@ build-arch := $(shell uname -m \
| sed 's/^i.86$$/i386/' \
| sed 's/^x86pc$$/i386/' \
| sed 's/amd64/x86_64/' \
| sed 's/^arm.*$$/arm/')
| sed 's/^arm.*$$/arm/' \
| sed 's/aarch64/arm64/')
build-platform := \
$(shell uname -s | tr [:upper:] [:lower:] \
| sed \
-e 's/^mingw32.*$$/mingw32/' \
-e 's/^mingw32.*$$/mingw32/' \
-e 's/^cygwin.*$$/cygwin/' \
-e 's/^darwin.*$$/macosx/')
@ -62,8 +63,8 @@ ifeq ($(filter compile interpret,$(process)),)
x := $(error "'$(process)' is not a valid process (choose one of: compile interpret)")
endif
ifeq ($(filter x86_64 i386 arm,$(arch)),)
x := $(error "'$(arch)' is not a supported architecture (choose one of: x86_64 i386 arm)")
ifeq ($(filter x86_64 i386 arm arm64,$(arch)),)
x := $(error "'$(arch)' is not a supported architecture (choose one of: x86_64 i386 arm arm64)")
endif
ifeq ($(platform),darwin)
@ -79,14 +80,14 @@ ifeq ($(filter linux windows macosx ios freebsd,$(platform)),)
endif
ifeq ($(platform),macosx)
ifeq ($(arch),arm)
x := $(error "please use 'arch=arm' 'platform=ios' to build for ios-arm")
ifneq ($(filter arm arm64,$(arch)),)
x := $(error "please use ('arch=arm' or 'arch=arm64') 'platform=ios' to build for ios-arm")
endif
endif
ifeq ($(platform),ios)
ifeq ($(filter arm i386,$(arch)),)
x := $(error "please specify 'arch=i386' or 'arch=arm' with 'platform=ios'")
ifeq ($(filter i386 arm arm64,$(arch)),)
x := $(error "please specify 'arch=i386', 'arch=arm', or 'arch=arm64' with 'platform=ios'")
endif
endif
@ -476,15 +477,15 @@ cflags = $(build-cflags)
common-lflags = -lm -lz
ifeq ($(use-clang),true)
ifeq ($(build-kernel),darwin)
common-lflags += -Wl,-export_dynamic
else
ifneq ($(platform),windows)
common-lflags += -Wl,-E
else
common-lflags += -Wl,--export-all-symbols
endif
endif
ifeq ($(build-kernel),darwin)
common-lflags += -Wl,-export_dynamic
else
ifneq ($(platform),windows)
common-lflags += -Wl,-E
else
common-lflags += -Wl,--export-all-symbols
endif
endif
endif
build-lflags = -lz -lpthread -ldl
@ -542,30 +543,39 @@ codeimage-symbols = _binary_codeimage_bin_start:_binary_codeimage_bin_end
developer-dir := $(shell if test -d /Developer/Platforms/$(target).platform/Developer/SDKs; then echo /Developer; \
else echo /Applications/Xcode.app/Contents/Developer; fi)
ifeq ($(arch),i386)
ifneq (,$(filter i386 arm,$(arch)))
pointer-size = 4
endif
ifeq ($(arch),arm)
ifneq (,$(filter arm arm64,$(arch)))
asm = arm
pointer-size = 4
ifneq ($(platform),ios)
no-psabi = -Wno-psabi
cflags += -marm $(no-psabi)
ifneq ($(arch),arm64)
no-psabi = -Wno-psabi
cflags += -marm $(no-psabi)
# By default, assume we can't use armv7-specific instructions on
# non-iOS platforms. Ideally, we'd detect this at runtime.
armv6=true
# By default, assume we can't use armv7-specific instructions on
# non-iOS platforms. Ideally, we'd detect this at runtime.
armv6=true
endif
endif
ifneq ($(arch),$(build-arch))
ifneq ($(kernel),darwin)
cxx = arm-linux-gnueabi-g++
cc = arm-linux-gnueabi-gcc
ar = arm-linux-gnueabi-ar
ranlib = arm-linux-gnueabi-ranlib
strip = arm-linux-gnueabi-strip
ifeq ($(arch),arm64)
cxx = aarch64-linux-gnu-g++
cc = aarch64-linux-gnu-gcc
ar = aarch64-linux-gnu-ar
ranlib = aarch64-linux-gnu-ranlib
strip = aarch64-linux-gnu-strip
else
cxx = arm-linux-gnueabi-g++
cc = arm-linux-gnueabi-gcc
ar = arm-linux-gnueabi-ar
ranlib = arm-linux-gnueabi-ranlib
strip = arm-linux-gnueabi-strip
endif
endif
endif
endif
@ -704,7 +714,7 @@ ifeq ($(kernel),darwin)
sdk-dir = $(platform-dir)/Developer/SDKs
mac-version := $(shell \
if test -d $(sdk-dir)/MacOSX10.9.sdk; then echo 10.9; \
if test -d $(sdk-dir)/MacOSX10.9.sdk; then echo 10.9; \
elif test -d $(sdk-dir)/MacOSX10.8.sdk; then echo 10.8; \
elif test -d $(sdk-dir)/MacOSX10.7.sdk; then echo 10.7; \
elif test -d $(sdk-dir)/MacOSX10.6.sdk; then echo 10.6; \
@ -743,7 +753,11 @@ ifeq ($(kernel),darwin)
else
target = iPhoneOS
sdk = iphoneos$(ios-version)
arch-flag = -arch armv7
ifeq ($(arch),arm)
arch-flag = -arch armv7
else
arch-flag = -arch arm64
endif
release = Release-iphoneos
endif
@ -751,7 +765,8 @@ ifeq ($(kernel),darwin)
sdk-dir = $(platform-dir)/Developer/SDKs
ios-version := $(shell \
if test -d $(sdk-dir)/$(target)8.0.sdk; then echo 8.0; \
if test -d $(sdk-dir)/$(target)8.1.sdk; then echo 8.1; \
elif test -d $(sdk-dir)/$(target)8.0.sdk; then echo 8.0; \
elif test -d $(sdk-dir)/$(target)7.1.sdk; then echo 7.1; \
elif test -d $(sdk-dir)/$(target)7.0.sdk; then echo 7.0; \
elif test -d $(sdk-dir)/$(target)6.1.sdk; then echo 6.1; \
@ -844,7 +859,7 @@ ifeq ($(platform),windows)
&& echo i686-w64-mingw32- || echo x86_64-w64-mingw32-)
cxx = $(prefix)g++ -m32
cc = $(prefix)gcc -m32
dlltool = $(prefix)dlltool -mi386 --as-flags=--32
dlltool = $(prefix)dlltool -mi386 --as-flags=--32
ar = $(prefix)ar
ranlib = $(prefix)ranlib
strip = $(prefix)strip --strip-all
@ -1213,7 +1228,7 @@ vm-sources = \
$(src)/jnienv.cpp \
$(src)/process.cpp
vm-asm-sources = $(src)/$(asm).$(asm-format)
vm-asm-sources = $(src)/$(arch).$(asm-format)
target-asm = $(asm)
@ -1230,7 +1245,6 @@ compiler-sources = \
$(src)/codegen/compiler.cpp \
$(wildcard $(src)/codegen/compiler/*.cpp) \
$(src)/debug-util.cpp \
$(src)/codegen/registers.cpp \
$(src)/codegen/runtime.cpp \
$(src)/codegen/targets.cpp \
$(src)/util/fixed-allocator.cpp
@ -1256,7 +1270,7 @@ ifeq ($(process),compile)
vm-sources += $(native-assembler-sources)
endif
ifeq ($(codegen-targets),all)
ifeq ($(arch),arm)
ifneq (,$(filter arm arm64,$(arch)))
# The x86 jit has a dependency on the x86 assembly code,
# and thus can't be successfully built on non-x86 platforms.
vm-sources += $(native-assembler-sources)
@ -1265,7 +1279,7 @@ ifeq ($(process),compile)
endif
endif
vm-asm-sources += $(src)/compile-$(asm).$(asm-format)
vm-asm-sources += $(src)/compile-$(arch).$(asm-format)
endif
cflags += -DAVIAN_PROCESS_$(process)
ifeq ($(aot-only),true)
@ -1277,7 +1291,7 @@ all-codegen-target-objects = $(call cpp-objects,$(all-codegen-target-sources),$(
vm-asm-objects = $(call asm-objects,$(vm-asm-sources),$(src),$(build))
vm-objects = $(vm-cpp-objects) $(vm-asm-objects)
heapwalk-sources = $(src)/heapwalk.cpp
heapwalk-sources = $(src)/heapwalk.cpp
heapwalk-objects = \
$(call cpp-objects,$(heapwalk-sources),$(src),$(build))
@ -1533,6 +1547,10 @@ ifeq ($(target-arch),arm)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM
endif
ifeq ($(target-arch),arm64)
cflags += -DAVIAN_TARGET_ARCH=AVIAN_ARCH_ARM64
endif
ifeq ($(target-format),elf)
cflags += -DAVIAN_TARGET_FORMAT=AVIAN_FORMAT_ELF
endif
@ -1659,9 +1677,10 @@ $(classpath-dep): $(classpath-sources) $(classpath-jar-dep)
@echo "compiling classpath classes"
@mkdir -p $(classpath-build)
classes="$(shell $(MAKE) -s --no-print-directory build=$(build) \
$(classpath-classes))"; if [ -n "$${classes}" ]; then \
$(classpath-classes) arch=$(build-arch) platform=$(build-platform))"; \
if [ -n "$${classes}" ]; then \
$(javac) -source 1.6 -target 1.6 \
-d $(classpath-build) -bootclasspath $(boot-classpath) \
-d $(classpath-build) -bootclasspath $(boot-classpath) \
$${classes}; fi
@touch $(@)
@ -1726,7 +1745,7 @@ $(build)/android.dep: $(luni-javas) $(dalvik-javas) $(libart-javas) \
$(build)/android/java/security/security.properties
chmod +w $(build)/android/java/security/security.properties
cp -r $(build)/android/* $(classpath-build)
@touch $(@)
@touch $(@)
$(test-build)/%.class: $(test)/%.java
@echo $(<)
@ -1737,7 +1756,7 @@ $(test-dep): $(test-sources) $(test-library)
files="$(shell $(MAKE) -s --no-print-directory build=$(build) $(test-classes))"; \
if test -n "$${files}"; then \
$(javac) -source 1.6 -target 1.6 \
-classpath $(test-build) -d $(test-build) -bootclasspath $(boot-classpath) $${files}; \
-classpath $(test-build) -d $(test-build) -bootclasspath $(boot-classpath) $${files}; \
fi
$(javac) -source 1.2 -target 1.1 -XDjsrlimit=0 -d $(test-build) \
-bootclasspath $(boot-classpath) test/Subroutine.java
@ -1749,7 +1768,7 @@ $(test-extra-dep): $(test-extra-sources)
files="$(shell $(MAKE) -s --no-print-directory build=$(build) $(test-extra-classes))"; \
if test -n "$${files}"; then \
$(javac) -source 1.6 -target 1.6 \
-d $(test-build) -bootclasspath $(boot-classpath) $${files}; \
-d $(test-build) -bootclasspath $(boot-classpath) $${files}; \
fi
@touch $(@)
@ -1831,7 +1850,7 @@ ifdef mt
endif
else
$(dlltool) -z $(addsuffix .def,$(basename $(@))) $(^)
$(dlltool) -d $(addsuffix .def,$(basename $(@))) -e $(addsuffix .exp,$(basename $(@)))
$(dlltool) -d $(addsuffix .def,$(basename $(@))) -e $(addsuffix .exp,$(basename $(@)))
$(ld) $(addsuffix .exp,$(basename $(@))) $(^) \
$(lflags) $(bootimage-lflags) -o $(@)
endif

View File

@ -10,7 +10,7 @@
details. */
#include "avian/types.h"
.text
#define LOCAL(x) .L##x
@ -18,7 +18,7 @@
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
# define GLOBAL(x) x
#endif
.globl GLOBAL(vmNativeCall)
@ -83,9 +83,9 @@ LOCAL(loop):
LOCAL(double):
cmp r8,#DOUBLE_TYPE
bne LOCAL(exit)
fmrrd r0,r1,d0
fmrrd r0,r1,d0
#endif
LOCAL(exit):
ldmfd sp!, {r4-r8, pc} // restore non-volatile regs and return
@ -111,7 +111,7 @@ GLOBAL(vmRun):
stmfd sp!, {r4-r11, lr}
// align stack
sub sp, sp, #12
str sp, [r2, #CHECKPOINT_STACK]
mov r12, r0

148
src/arm64.S Normal file
View File

@ -0,0 +1,148 @@
/* arm.S: JNI gluecode for ARM
Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
.text
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.globl GLOBAL(vmNativeCall)
.align 2
GLOBAL(vmNativeCall):
// arguments:
// x0 -> x19 : function
// w1 -> w20 : stackTotal
// x2 : memoryTable
// w3 : memoryCount
// x4 -> x21 : gprTable
// x5 -> x22 : vfpTable
// w6 -> w23 : returnType
// allocate frame
stp x29, x30, [sp,#-64]!
mov x29, sp
// save callee-saved register values so we can clobber them
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
str x23, [sp,#48]
// move arguments into callee-saved registers
mov x19, x0
mov w20, w1
mov x21, x4
mov x22, x5
mov w23, w6
// setup stack arguments if necessary
sub sp, sp, w20, uxtw // allocate stack
mov x9, sp
LOCAL(loop):
cmp w3, wzr
b.eq LOCAL(populateGPRs)
ldr x0, [x2], #8
str x0, [x9], #8
sub w3, w3, #8
b LOCAL(loop)
LOCAL(populateGPRs):
cmp x21, xzr
b.eq LOCAL(populateVFPs)
ldp x0, x1, [x21]
ldp x2, x3, [x21,#16]
ldp x4, x5, [x21,#32]
ldp x6, x7, [x21,#48]
LOCAL(populateVFPs):
cmp x22, xzr
b.eq LOCAL(doCall)
ldp d0, d1, [x22]
ldp d2, d3, [x22,#16]
ldp d4, d5, [x22,#32]
ldp d6, d7, [x22,#48]
LOCAL(doCall):
blr x19 // call function
add sp, sp, w20, uxtw // deallocate stack
cmp w23,#FLOAT_TYPE
b.ne LOCAL(double)
fmov w0,s0
b LOCAL(exit)
LOCAL(double):
cmp w23,#DOUBLE_TYPE
b.ne LOCAL(exit)
fmov x0,d0
LOCAL(exit):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldr x23, [sp,#48]
ldp x29, x30, [sp],#64
ret
.globl GLOBAL(vmJump)
.align 2
GLOBAL(vmJump):
mov x30, x0
mov x0, x4
mov x1, x5
mov sp, x2
mov x19, x3
br x30
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
.globl GLOBAL(vmRun)
.align 2
GLOBAL(vmRun):
// x0: function
// x1: arguments
// x2: checkpoint
// allocate frame
stp x29, x30, [sp,#-96]!
mov x29, sp
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
mov x19, sp
str x19, [x2, #CHECKPOINT_STACK]
mov x19, x0
ldr x0, [x2, #CHECKPOINT_THREAD]
blr x19
.globl GLOBAL(vmRun_returnAddress)
.align 2
GLOBAL(vmRun_returnAddress):
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
br x30

View File

@ -43,7 +43,7 @@ inline void compileTimeMemoryBarrier()
#if (defined ARCH_x86_32) || (defined ARCH_x86_64)
#include "x86.h"
#elif defined ARCH_arm
#elif (defined ARCH_arm) || (defined ARCH_arm64)
#include "arm.h"
#else
#error unsupported architecture

View File

@ -34,7 +34,11 @@
#define THREAD_STATE_IP(state) ((state).FIELD(pc))
#define THREAD_STATE_STACK(state) ((state).FIELD(sp))
#if (defined __APPLE__) && (defined ARCH_arm64)
#define THREAD_STATE_THREAD(state) ((state).FIELD(x[19]))
#else
#define THREAD_STATE_THREAD(state) ((state).FIELD(r[8]))
#endif
#define THREAD_STATE_LINK(state) ((state).FIELD(lr))
#define IP_REGISTER(context) THREAD_STATE_IP(context->uc_mcontext->FIELD(ss))
@ -53,10 +57,17 @@
#define THREAD_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_IP])
#define LINK_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_LR])
#else
#ifdef ARCH_arm
#define IP_REGISTER(context) (context->uc_mcontext.arm_pc)
#define STACK_REGISTER(context) (context->uc_mcontext.arm_sp)
#define THREAD_REGISTER(context) (context->uc_mcontext.arm_ip)
#define LINK_REGISTER(context) (context->uc_mcontext.arm_lr)
#else
#define IP_REGISTER(context) (context->uc_mcontext.pc)
#define STACK_REGISTER(context) (context->uc_mcontext.sp)
#define THREAD_REGISTER(context) (context->uc_mcontext.regs[19])
#define LINK_REGISTER(context) (context->uc_mcontext.regs[30])
#endif
#endif
#define VA_LIST(x) (&(x))
@ -76,7 +87,7 @@ inline void trap()
#ifdef _MSC_VER
__debugbreak();
#else
asm("bkpt");
asm("brk 0");
#endif
}
@ -162,6 +173,8 @@ inline bool atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
old, new_, reinterpret_cast<int32_t*>(p));
#elif(defined __QNX__)
return old == _smp_cmpxchg(p, old, new_);
#elif (defined ARCH_arm64)
return __sync_bool_compare_and_swap(p, old, new_);
#else
int r = __kernel_cmpxchg(
static_cast<int>(old), static_cast<int>(new_), reinterpret_cast<int*>(p));
@ -169,10 +182,22 @@ inline bool atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
#endif
}
#ifdef ARCH_arm64
inline bool atomicCompareAndSwap64(uint64_t* p, uint64_t old, uint64_t new_)
{
return __sync_bool_compare_and_swap(p, old, new_);
}
inline bool atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{
return atomicCompareAndSwap64(reinterpret_cast<uint64_t*>(p), old, new_);
}
#else
inline bool atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{
return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_);
}
#endif
inline uint64_t dynamicCall(void* function,
uintptr_t* arguments,
@ -181,17 +206,17 @@ inline uint64_t dynamicCall(void* function,
unsigned argumentsSize UNUSED,
unsigned returnType)
{
#ifdef __APPLE__
#if (defined __APPLE__) || (defined ARCH_arm64)
const unsigned Alignment = 1;
#else
const unsigned Alignment = 2;
#endif
const unsigned GprCount = 4;
const unsigned GprCount = BytesPerWord;
uintptr_t gprTable[GprCount];
unsigned gprIndex = 0;
const unsigned VfpCount = 16;
const unsigned VfpCount = BytesPerWord == 8 ? 8 : 16;
uintptr_t vfpTable[VfpCount];
unsigned vfpIndex = 0;
unsigned vfpBackfillIndex UNUSED = 0;
@ -206,7 +231,7 @@ inline uint64_t dynamicCall(void* function,
for (unsigned ati = 0; ati < argumentCount; ++ati) {
switch (argumentTypes[ati]) {
case DOUBLE_TYPE:
#if defined(__ARM_PCS_VFP)
#if (defined __ARM_PCS_VFP) || (defined ARCH_arm64)
{
if (vfpIndex + Alignment <= VfpCount) {
if (vfpIndex % Alignment) {

View File

@ -611,11 +611,11 @@ void intercept(Thread* t,
if (m) {
PROTECT(t, m);
m->flags() |= ACC_NATIVE;
if (updateRuntimeData) {
GcMethod* clone = methodClone(t, m);
m->flags() |= ACC_NATIVE;
// make clone private to prevent vtable updates at compilation
// time. Otherwise, our interception might be bypassed by calls
// through the vtable.
@ -628,6 +628,8 @@ void intercept(Thread* t,
GcMethodRuntimeData* runtimeData = getMethodRuntimeData(t, m);
runtimeData->setNative(t, native->as<GcNative>(t));
} else {
m->flags() |= ACC_NATIVE;
}
} else {
// If we can't find the method, just ignore it, since ProGuard may

View File

@ -116,6 +116,8 @@ typedef intptr_t intptr_alias_t;
#define ARCH_x86_64
#elif defined __arm__
#define ARCH_arm
#elif defined __aarch64__
#define ARCH_arm64
#else
#error "unsupported architecture"
#endif
@ -184,7 +186,7 @@ typedef intptr_t __attribute__((__may_alias__)) intptr_alias_t;
#error "Unsupported architecture"
#endif
#endif
#ifdef PLATFORM_WINDOWS
#define SO_PREFIX ""
#else

View File

@ -28,5 +28,6 @@
#define AVIAN_ARCH_X86 (1 << 8)
#define AVIAN_ARCH_X86_64 (2 << 8)
#define AVIAN_ARCH_ARM (3 << 8)
#define AVIAN_ARCH_ARM64 (4 << 8)
#endif

View File

@ -1576,6 +1576,10 @@ int64_t JNICALL
ZipFile::Entry* find(ZipFile* file, const char* path, unsigned pathLength)
{
if (pathLength > 0 && path[0] == '/') {
++path;
--pathLength;
}
unsigned i = hash(path) & (file->indexSize - 1);
for (ZipFile::Entry* e = file->index[i]; e; e = e->next) {
const uint8_t* p = e->start;
@ -1601,13 +1605,17 @@ int64_t JNICALL
memcpy(RUNTIME_ARRAY_BODY(p), path->body().begin(), path->length());
RUNTIME_ARRAY_BODY(p)[path->length()] = 0;
replace('\\', '/', RUNTIME_ARRAY_BODY(p));
if (addSlash) {
ZipFile::Entry *e = find(file, RUNTIME_ARRAY_BODY(p), path->length());
if (e == 0 and addSlash and RUNTIME_ARRAY_BODY(p)[path->length()] != '/') {
RUNTIME_ARRAY_BODY(p)[path->length()] = '/';
RUNTIME_ARRAY_BODY(p)[path->length() + 1] = 0;
e = find(file, RUNTIME_ARRAY_BODY(p), path->length());
}
return reinterpret_cast<int64_t>(
find(file, RUNTIME_ARRAY_BODY(p), path->length()));
return reinterpret_cast<int64_t>(e);
} else {
int64_t entry
= cast<GcLong>(t,
@ -1792,9 +1800,9 @@ void JNICALL freeZipFileEntry(Thread* t, GcMethod* method, uintptr_t* arguments)
0,
file->file,
entry->entry);
}
t->m->heap->free(entry, sizeof(ZipFile::Entry));
t->m->heap->free(entry, sizeof(ZipFile::Entry));
}
}
int64_t JNICALL

View File

@ -1,6 +1,5 @@
add_library (avian_codegen
compiler.cpp
registers.cpp
runtime.cpp
targets.cpp

View File

@ -256,10 +256,10 @@ Site* pickTargetSite(Context* c,
expect(c, target.cost < Target::Impossible);
if (target.type == lir::MemoryOperand) {
if (target.type == lir::Operand::Type::Memory) {
return frameSite(c, target.index);
} else {
return registerSite(c, target.index);
return registerSite(c, Register(target.index));
}
}
@ -342,7 +342,7 @@ Site* maybeMove(Context* c,
OperandMask src;
OperandMask tmp;
c->arch->planMove(
size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask));
size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask, 0));
SiteMask srcMask = SiteMask::lowPart(src);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
@ -369,7 +369,7 @@ Site* maybeMove(Context* c,
size,
src,
tmp,
OperandMask(1 << dstSite->type(c), dstSite->registerMask(c)));
OperandMask(1 << (unsigned)dstSite->type(c), dstSite->registerMask(c), 0));
SiteMask srcMask = SiteMask::lowPart(src);
unsigned cost = 0xFFFFFFFF;
@ -514,15 +514,15 @@ void steal(Context* c, Resource* r, Value* thief)
SiteMask generalRegisterMask(Context* c)
{
return SiteMask(1 << lir::RegisterOperand,
c->regFile->generalRegisters.mask,
return SiteMask(lir::Operand::RegisterPairMask,
c->regFile->generalRegisters,
NoFrameIndex);
}
SiteMask generalRegisterOrConstantMask(Context* c)
{
return SiteMask((1 << lir::RegisterOperand) | (1 << lir::ConstantOperand),
c->regFile->generalRegisters.mask,
return SiteMask(lir::Operand::RegisterPairMask | lir::Operand::ConstantMask,
c->regFile->generalRegisters,
NoFrameIndex);
}
@ -616,11 +616,11 @@ bool isHome(Value* v, int frameIndex)
bool acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask)
{
if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) {
if (s->type(c) == lir::RegisterOperand) {
if (s->type(c) == lir::Operand::Type::RegisterPair) {
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
} else {
assertT(c,
s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)));
s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex)));
return isHome(read->value,
offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset));
@ -698,7 +698,7 @@ void apply(Context* c,
{
assertT(c, s1Low->type(c) == s1High->type(c));
lir::OperandType s1Type = s1Low->type(c);
lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union);
@ -717,11 +717,11 @@ void apply(Context* c,
assertT(c, s1Low->type(c) == s1High->type(c));
assertT(c, s2Low->type(c) == s2High->type(c));
lir::OperandType s1Type = s1Low->type(c);
lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union);
lir::OperandType s2Type = s2Low->type(c);
lir::Operand::Type s2Type = s2Low->type(c);
OperandUnion s2Union;
asAssemblerOperand(c, s2Low, s2High, &s2Union);
@ -746,15 +746,15 @@ void apply(Context* c,
assertT(c, s2Low->type(c) == s2High->type(c));
assertT(c, s3Low->type(c) == s3High->type(c));
lir::OperandType s1Type = s1Low->type(c);
lir::Operand::Type s1Type = s1Low->type(c);
OperandUnion s1Union;
asAssemblerOperand(c, s1Low, s1High, &s1Union);
lir::OperandType s2Type = s2Low->type(c);
lir::Operand::Type s2Type = s2Low->type(c);
OperandUnion s2Union;
asAssemblerOperand(c, s2Low, s2High, &s2Union);
lir::OperandType s3Type = s3Low->type(c);
lir::Operand::Type s3Type = s3Low->type(c);
OperandUnion s3Union;
asAssemblerOperand(c, s3Low, s3High, &s3Union);
@ -782,7 +782,7 @@ void saveLocals(Context* c, Event* e)
e->addRead(
c,
local->value,
SiteMask(1 << lir::MemoryOperand, 0, compiler::frameIndex(c, li)));
SiteMask(lir::Operand::MemoryMask, 0, compiler::frameIndex(c, li)));
}
}
}
@ -815,10 +815,10 @@ void maybeMove(Context* c,
if (cost) {
// todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand
and srcValue->source->type(c) == lir::MemoryOperand)
bool useTemporary = ((target->type(c) == lir::Operand::Type::Memory
and srcValue->source->type(c) == lir::Operand::Type::Memory)
or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand));
and target->type(c) != lir::Operand::Type::RegisterPair));
srcValue->source->freeze(c, srcValue);
@ -827,7 +827,7 @@ void maybeMove(Context* c,
srcValue->source->thaw(c, srcValue);
bool addOffset = srcSize != srcSelectSize and c->arch->bigEndian()
and srcValue->source->type(c) == lir::MemoryOperand;
and srcValue->source->type(c) == lir::Operand::Type::Memory;
if (addOffset) {
static_cast<MemorySite*>(srcValue->source)->offset
@ -874,14 +874,14 @@ void maybeMove(Context* c,
c->arch->planSource(op, dstSize, src, dstSize, &thunk);
if (isGeneralValue(srcValue)) {
src.registerMask &= c->regFile->generalRegisters.mask;
src.lowRegisterMask &= c->regFile->generalRegisters;
}
assertT(c, thunk == 0);
assertT(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
assertT(c, dstMask.typeMask & src.typeMask & lir::Operand::RegisterPairMask);
Site* tmpTarget
= freeRegisterSite(c, dstMask.registerMask & src.registerMask);
= freeRegisterSite(c, dstMask.registerMask & src.lowRegisterMask);
srcValue->source->freeze(c, srcValue);
@ -1635,8 +1635,8 @@ bool resolveSourceSites(Context* c,
Read* r = live(c, v);
if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
c->regFile->generalRegisters.mask,
SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
c->regFile->generalRegisters,
AnyFrameIndex);
Site* s = pickSourceSite(
@ -1677,8 +1677,8 @@ void resolveTargetSites(Context* c,
Read* r = live(c, v);
if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
c->regFile->generalRegisters.mask,
SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
c->regFile->generalRegisters,
AnyFrameIndex);
Site* s = pickSourceSite(
@ -2210,24 +2210,24 @@ class Client : public Assembler::Client {
{
}
virtual int acquireTemporary(uint32_t mask)
virtual Register acquireTemporary(RegisterMask mask)
{
unsigned cost;
int r = pickRegisterTarget(c, 0, mask, &cost);
Register r = pickRegisterTarget(c, 0, mask, &cost);
expect(c, cost < Target::Impossible);
save(r);
c->registerResources[r].increment(c);
c->registerResources[r.index()].increment(c);
return r;
}
virtual void releaseTemporary(int r)
virtual void releaseTemporary(Register r)
{
c->registerResources[r].decrement(c);
c->registerResources[r.index()].decrement(c);
}
virtual void save(int r)
virtual void save(Register r)
{
RegisterResource* reg = c->registerResources + r;
RegisterResource* reg = c->registerResources + r.index();
assertT(c, reg->referenceCount == 0);
assertT(c, reg->freezeCount == 0);

View File

@ -53,19 +53,15 @@ Context::Context(vm::System* system,
- regFile->generalRegisters.start),
targetInfo(arch->targetInfo())
{
for (unsigned i = regFile->generalRegisters.start;
i < regFile->generalRegisters.limit;
++i) {
new (registerResources + i) RegisterResource(arch->reserved(i));
for (Register i : regFile->generalRegisters) {
new (registerResources + i.index()) RegisterResource(arch->reserved(i));
if (registerResources[i].reserved) {
if (registerResources[i.index()].reserved) {
--availableGeneralRegisterCount;
}
}
for (unsigned i = regFile->floatRegisters.start;
i < regFile->floatRegisters.limit;
++i) {
new (registerResources + i) RegisterResource(arch->reserved(i));
for (Register i : regFile->floatRegisters) {
new (registerResources + i.index()) RegisterResource(arch->reserved(i));
}
}

View File

@ -372,7 +372,7 @@ class CallEvent : public Event {
? arguments.count
: 0)
{
uint32_t registerMask = c->regFile->generalRegisters.mask;
RegisterMask registerMask = c->regFile->generalRegisters;
if (callingConvention == ir::CallingConvention::Native) {
assertT(c, (flags & Compiler::TailJump) == 0);
@ -396,14 +396,14 @@ class CallEvent : public Event {
SiteMask targetMask;
if (index + (c->arch->argumentRegisterAlignment() ? footprint : 1)
<= c->arch->argumentRegisterCount()) {
int number = c->arch->argumentRegister(index);
Register number = c->arch->argumentRegister(index);
if (DebugReads) {
fprintf(stderr, "reg %d arg read %p\n", number, v);
fprintf(stderr, "reg %d arg read %p\n", number.index(), v);
}
targetMask = SiteMask::fixedRegisterMask(number);
registerMask &= ~(1 << number);
registerMask = registerMask.excluding(number);
} else {
if (index < c->arch->argumentRegisterCount()) {
index = c->arch->argumentRegisterCount();
@ -415,7 +415,7 @@ class CallEvent : public Event {
fprintf(stderr, "stack %d arg read %p\n", frameIndex, v);
}
targetMask = SiteMask(1 << lir::MemoryOperand, 0, frameIndex);
targetMask = SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
}
this->addRead(c, v, targetMask);
@ -445,7 +445,7 @@ class CallEvent : public Event {
this->addRead(
c,
address,
SiteMask(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
SiteMask(op.typeMask, registerMask & op.lowRegisterMask, AnyFrameIndex));
}
Stack* stack = stackBefore;
@ -512,7 +512,7 @@ class CallEvent : public Event {
this->addRead(c, v, generalRegisterMask(c));
} else {
this->addRead(
c, v, SiteMask(1 << lir::MemoryOperand, 0, frameIndex));
c, v, SiteMask(lir::Operand::MemoryMask, 0, frameIndex));
}
}
}
@ -544,7 +544,7 @@ class CallEvent : public Event {
this->addRead(c,
stack->value,
SiteMask(1 << lir::MemoryOperand, 0, logicalIndex));
SiteMask(lir::Operand::MemoryMask, 0, logicalIndex));
}
stack = stack->next;
@ -581,29 +581,29 @@ class CallEvent : public Event {
assertT(
c,
returnAddressSurrogate == 0
or returnAddressSurrogate->source->type(c) == lir::RegisterOperand);
or returnAddressSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
assertT(
c,
framePointerSurrogate == 0
or framePointerSurrogate->source->type(c) == lir::RegisterOperand);
or framePointerSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
int ras;
Register ras;
if (returnAddressSurrogate) {
returnAddressSurrogate->source->freeze(c, returnAddressSurrogate);
ras = static_cast<RegisterSite*>(returnAddressSurrogate->source)
->number;
} else {
ras = lir::NoRegister;
ras = NoRegister;
}
int fps;
Register fps;
if (framePointerSurrogate) {
framePointerSurrogate->source->freeze(c, framePointerSurrogate);
fps = static_cast<RegisterSite*>(framePointerSurrogate->source)->number;
} else {
fps = lir::NoRegister;
fps = NoRegister;
}
int offset = static_cast<int>(footprint)
@ -783,9 +783,9 @@ class MoveEvent : public Event {
op,
srcSelectSize,
OperandMask(
1 << srcValue->source->type(c),
(static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c))
<< 32) | static_cast<uint64_t>(srcValue->source->registerMask(c))),
1 << (unsigned)srcValue->source->type(c),
srcValue->source->registerMask(c),
srcValue->nextWord->source->registerMask(c)),
dstSize,
dst);
@ -866,7 +866,7 @@ class MoveEvent : public Event {
assertT(c, srcSelectSize == c->targetInfo.pointerSize);
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
assertT(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
assertT(c, dstLowMask.typeMask & lir::Operand::RegisterPairMask);
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
@ -897,7 +897,7 @@ class MoveEvent : public Event {
srcValue->source->thaw(c, srcValue);
assertT(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
assertT(c, dstHighMask.typeMask & lir::Operand::RegisterPairMask);
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
@ -1126,18 +1126,14 @@ class CombineEvent : public Event {
op,
firstValue->type.size(c->targetInfo),
OperandMask(
1 << firstValue->source->type(c),
(static_cast<uint64_t>(
firstValue->nextWord->source->registerMask(c))
<< 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
1 << (unsigned)firstValue->source->type(c),
firstValue->source->registerMask(c),
firstValue->nextWord->source->registerMask(c)),
secondValue->type.size(c->targetInfo),
OperandMask(
1 << secondValue->source->type(c),
(static_cast<uint64_t>(
secondValue->nextWord->source->registerMask(c))
<< 32)
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
1 << (unsigned)secondValue->source->type(c),
secondValue->source->registerMask(c),
secondValue->nextWord->source->registerMask(c)),
resultValue->type.size(c->targetInfo),
cMask);
@ -1318,11 +1314,9 @@ class TranslateEvent : public Event {
op,
firstValue->type.size(c->targetInfo),
OperandMask(
1 << firstValue->source->type(c),
(static_cast<uint64_t>(
firstValue->nextWord->source->registerMask(c))
<< 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
1 << (unsigned)firstValue->source->type(c),
firstValue->source->registerMask(c),
firstValue->nextWord->source->registerMask(c)),
resultValue->type.size(c->targetInfo),
bMask);
@ -1457,7 +1451,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
{
for (SiteIterator it(c, v); it.hasMore();) {
Site* s = it.next();
if (s->type(c) == lir::ConstantOperand) {
if (s->type(c) == lir::Operand::Type::Constant) {
return static_cast<ConstantSite*>(s);
}
}
@ -1467,7 +1461,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
void moveIfConflict(Context* c, Value* v, MemorySite* s)
{
if (v->reads) {
SiteMask mask(1 << lir::RegisterOperand, ~0, AnyFrameIndex);
SiteMask mask(lir::Operand::RegisterPairMask, ~0, AnyFrameIndex);
v->reads->intersect(&mask);
if (s->conflicts(mask)) {
maybeMove(c, v->reads, true, false);
@ -1504,29 +1498,29 @@ class MemoryEvent : public Event {
virtual void compile(Context* c)
{
int indexRegister;
Register indexRegister;
int displacement = this->displacement;
unsigned scale = this->scale;
if (index) {
ConstantSite* constant = findConstantSite(c, index);
if (constant) {
indexRegister = lir::NoRegister;
indexRegister = NoRegister;
displacement += (constant->value->value() * scale);
scale = 1;
} else {
assertT(c, index->source->type(c) == lir::RegisterOperand);
assertT(c, index->source->type(c) == lir::Operand::Type::RegisterPair);
indexRegister = static_cast<RegisterSite*>(index->source)->number;
}
} else {
indexRegister = lir::NoRegister;
indexRegister = NoRegister;
}
assertT(c, base->source->type(c) == lir::RegisterOperand);
int baseRegister = static_cast<RegisterSite*>(base->source)->number;
assertT(c, base->source->type(c) == lir::Operand::Type::RegisterPair);
Register baseRegister = static_cast<RegisterSite*>(base->source)->number;
popRead(c, this, base);
if (index) {
if (c->targetInfo.pointerSize == 8 and indexRegister != lir::NoRegister) {
if (c->targetInfo.pointerSize == 8 and indexRegister != NoRegister) {
apply(c,
lir::Move,
4,
@ -1718,9 +1712,9 @@ class BranchEvent : public Event {
OperandMask dstMask;
c->arch->planDestination(op,
firstValue->type.size(c->targetInfo),
OperandMask(0, 0),
OperandMask(0, 0, 0),
firstValue->type.size(c->targetInfo),
OperandMask(0, 0),
OperandMask(0, 0, 0),
c->targetInfo.pointerSize,
dstMask);
@ -1879,12 +1873,12 @@ void clean(Context* c, Value* v, unsigned popIndex)
{
for (SiteIterator it(c, v); it.hasMore();) {
Site* s = it.next();
if (not(s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))
if (not(s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))
and offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset)
>= popIndex)) {
if (false
and s->match(c,
SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))) {
SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))) {
char buffer[256];
s->toString(c, buffer, 256);
fprintf(stderr,
@ -2016,7 +2010,7 @@ class BoundsCheckEvent : public Event {
lir::Constant handlerConstant(resolvedPromise(c, handler));
a->apply(lir::Call,
OperandInfo(c->targetInfo.pointerSize,
lir::ConstantOperand,
lir::Operand::Type::Constant,
&handlerConstant));
}
} else {
@ -2038,10 +2032,10 @@ class BoundsCheckEvent : public Event {
}
if (constant == 0 or constant->value->value() >= 0) {
assertT(c, object->source->type(c) == lir::RegisterOperand);
assertT(c, object->source->type(c) == lir::Operand::Type::RegisterPair);
MemorySite length(static_cast<RegisterSite*>(object->source)->number,
lengthOffset,
lir::NoRegister,
NoRegister,
1);
length.acquired = true;
@ -2072,7 +2066,7 @@ class BoundsCheckEvent : public Event {
lir::Constant handlerConstant(resolvedPromise(c, handler));
a->apply(lir::Call,
OperandInfo(c->targetInfo.pointerSize,
lir::ConstantOperand,
lir::Operand::Type::Constant,
&handlerConstant));
nextPromise->offset = a->offset();

View File

@ -205,7 +205,7 @@ Read* StubRead::next(Context*)
SingleRead* read(Context* c, const SiteMask& mask, Value* successor)
{
assertT(c,
(mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0);
(mask.typeMask != lir::Operand::MemoryMask) or mask.frameIndex >= 0);
return new (c->zone) SingleRead(mask, successor);
}

View File

@ -57,24 +57,24 @@ unsigned resourceCost(Context* c,
}
bool pickRegisterTarget(Context* c,
int i,
Register i,
Value* v,
uint32_t mask,
int* target,
RegisterMask mask,
Register* target,
unsigned* cost,
CostCalculator* costCalculator)
{
if ((1 << i) & mask) {
RegisterResource* r = c->registerResources + i;
if (mask.contains(i)) {
RegisterResource* r = c->registerResources + i.index();
unsigned myCost
= resourceCost(
c,
v,
r,
SiteMask(1 << lir::RegisterOperand, 1 << i, NoFrameIndex),
SiteMask(lir::Operand::RegisterPairMask, RegisterMask(i), NoFrameIndex),
costCalculator) + Target::MinimumRegisterCost;
if ((static_cast<uint32_t>(1) << i) == mask) {
if (mask.containsExactly(i)) {
*cost = myCost;
return true;
} else if (myCost < *cost) {
@ -85,29 +85,25 @@ bool pickRegisterTarget(Context* c,
return false;
}
int pickRegisterTarget(Context* c,
Register pickRegisterTarget(Context* c,
Value* v,
uint32_t mask,
RegisterMask mask,
unsigned* cost,
CostCalculator* costCalculator)
{
int target = lir::NoRegister;
Register target = NoRegister;
*cost = Target::Impossible;
if (mask & c->regFile->generalRegisters.mask) {
for (int i = c->regFile->generalRegisters.limit - 1;
i >= c->regFile->generalRegisters.start;
--i) {
if (mask & c->regFile->generalRegisters) {
for (Register i : c->regFile->generalRegisters) {
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
return i;
}
}
}
if (mask & c->regFile->floatRegisters.mask) {
for (int i = c->regFile->floatRegisters.start;
i < static_cast<int>(c->regFile->floatRegisters.limit);
++i) {
if (mask & c->regFile->floatRegisters) {
for (Register i : c->regFile->floatRegisters) {
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
return i;
}
@ -119,12 +115,12 @@ int pickRegisterTarget(Context* c,
Target pickRegisterTarget(Context* c,
Value* v,
uint32_t mask,
RegisterMask mask,
CostCalculator* costCalculator)
{
unsigned cost;
int number = pickRegisterTarget(c, v, mask, &cost, costCalculator);
return Target(number, lir::RegisterOperand, cost);
Register number = pickRegisterTarget(c, v, mask, &cost, costCalculator);
return Target(number, cost);
}
unsigned frameCost(Context* c,
@ -135,7 +131,7 @@ unsigned frameCost(Context* c,
return resourceCost(c,
v,
c->frameResources + frameIndex,
SiteMask(1 << lir::MemoryOperand, 0, frameIndex),
SiteMask(lir::Operand::MemoryMask, 0, frameIndex),
costCalculator) + Target::MinimumFrameCost;
}
@ -147,7 +143,7 @@ Target pickFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
do {
if (p->home >= 0) {
Target mine(p->home,
lir::MemoryOperand,
lir::Operand::Type::Memory,
frameCost(c, v, p->home, costCalculator));
if (mine.cost == Target::MinimumFrameCost) {
@ -168,7 +164,7 @@ Target pickAnyFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
unsigned count = totalFrameSize(c);
for (unsigned i = 0; i < count; ++i) {
Target mine(i, lir::MemoryOperand, frameCost(c, v, i, costCalculator));
Target mine(i, lir::Operand::Type::Memory, frameCost(c, v, i, costCalculator));
if (mine.cost == Target::MinimumFrameCost) {
return mine;
} else if (mine.cost < best.cost) {
@ -186,7 +182,7 @@ Target pickTarget(Context* c,
Target best,
CostCalculator* costCalculator)
{
if (mask.typeMask & (1 << lir::RegisterOperand)) {
if (mask.typeMask & lir::Operand::RegisterPairMask) {
Target mine
= pickRegisterTarget(c, value, mask.registerMask, costCalculator);
@ -198,10 +194,10 @@ Target pickTarget(Context* c,
}
}
if (mask.typeMask & (1 << lir::MemoryOperand)) {
if (mask.typeMask & lir::Operand::MemoryMask) {
if (mask.frameIndex >= 0) {
Target mine(mask.frameIndex,
lir::MemoryOperand,
lir::Operand::Type::Memory,
frameCost(c, value, mask.frameIndex, costCalculator));
if (mine.cost == Target::MinimumFrameCost) {
return mine;
@ -234,14 +230,14 @@ Target pickTarget(Context* c,
Value* value = read->value;
uint32_t registerMask
= (isFloatValue(value) ? ~0 : c->regFile->generalRegisters.mask);
RegisterMask registerMask
= (isFloatValue(value) ? AnyRegisterMask : (RegisterMask)c->regFile->generalRegisters);
SiteMask mask(~0, registerMask, AnyFrameIndex);
read->intersect(&mask);
if (isFloatValue(value)) {
uint32_t floatMask = mask.registerMask & c->regFile->floatRegisters.mask;
RegisterMask floatMask = mask.registerMask & c->regFile->floatRegisters;
if (floatMask) {
mask.registerMask = floatMask;
}
@ -273,9 +269,9 @@ Target pickTarget(Context* c,
if (intersectRead) {
if (best.cost == Target::Impossible) {
fprintf(stderr,
"mask type %d reg %d frame %d\n",
"mask type %d reg %" LLD " frame %d\n",
mask.typeMask,
mask.registerMask,
(uint64_t)mask.registerMask,
mask.frameIndex);
abort(c);
}

View File

@ -55,13 +55,18 @@ class Target {
{
}
Target(int index, lir::OperandType type, unsigned cost)
Target(int16_t index, lir::Operand::Type type, unsigned cost)
: index(index), type(type), cost(cost)
{
}
Target(Register reg, unsigned cost)
: index(reg.index()), type(lir::Operand::Type::RegisterPair), cost(cost)
{
}
int16_t index;
lir::OperandType type;
lir::Operand::Type type;
uint8_t cost;
};
@ -77,22 +82,22 @@ unsigned resourceCost(Context* c,
CostCalculator* costCalculator);
bool pickRegisterTarget(Context* c,
int i,
Register i,
Value* v,
uint32_t mask,
int* target,
RegisterMask mask,
Register* target,
unsigned* cost,
CostCalculator* costCalculator = 0);
int pickRegisterTarget(Context* c,
Register pickRegisterTarget(Context* c,
Value* v,
uint32_t mask,
RegisterMask mask,
unsigned* cost,
CostCalculator* costCalculator = 0);
Target pickRegisterTarget(Context* c,
Value* v,
uint32_t mask,
RegisterMask mask,
CostCalculator* costCalculator = 0);
unsigned frameCost(Context* c,

View File

@ -88,7 +88,7 @@ void RegisterResource::freeze(Context* c, Value* v)
freezeResource(c, this, v);
if (freezeCount == 1
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) {
and c->regFile->generalRegisters.contains(index(c))) {
decrementAvailableGeneralRegisterCount(c);
}
}
@ -100,7 +100,7 @@ void RegisterResource::thaw(Context* c, Value* v)
thawResource(c, this, v);
if (freezeCount == 0
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) {
and c->regFile->generalRegisters.contains(index(c))) {
incrementAvailableGeneralRegisterCount(c);
}
}
@ -113,9 +113,9 @@ unsigned RegisterResource::toString(Context* c,
return vm::snprintf(buffer, bufferSize, "register %d", index(c));
}
unsigned RegisterResource::index(Context* c)
Register RegisterResource::index(Context* c)
{
return this - c->registerResources;
return Register(this - c->registerResources);
}
void RegisterResource::increment(Context* c)
@ -130,7 +130,7 @@ void RegisterResource::increment(Context* c)
++this->referenceCount;
if (this->referenceCount == 1
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) {
and c->regFile->generalRegisters.contains(this->index(c))) {
decrementAvailableGeneralRegisterCount(c);
}
}
@ -150,7 +150,7 @@ void RegisterResource::decrement(Context* c)
--this->referenceCount;
if (this->referenceCount == 0
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) {
and c->regFile->generalRegisters.contains(this->index(c))) {
incrementAvailableGeneralRegisterCount(c);
}
}

View File

@ -48,7 +48,7 @@ class RegisterResource : public Resource {
virtual unsigned toString(Context* c, char* buffer, unsigned bufferSize);
virtual unsigned index(Context*);
virtual Register index(Context*);
void increment(Context*);

View File

@ -152,7 +152,7 @@ class AddressSite : public Site {
virtual bool match(Context*, const SiteMask& mask)
{
return mask.typeMask & (1 << lir::AddressOperand);
return mask.typeMask & lir::Operand::AddressMask;
}
virtual bool loneMatch(Context*, const SiteMask&)
@ -165,9 +165,9 @@ class AddressSite : public Site {
abort(c);
}
virtual lir::OperandType type(Context*)
virtual lir::Operand::Type type(Context*)
{
return lir::AddressOperand;
return lir::Operand::Type::Address;
}
virtual void asAssemblerOperand(Context* c UNUSED,
@ -201,7 +201,7 @@ class AddressSite : public Site {
virtual SiteMask mask(Context*)
{
return SiteMask(1 << lir::AddressOperand, 0, NoFrameIndex);
return SiteMask(lir::Operand::AddressMask, 0, NoFrameIndex);
}
virtual SiteMask nextWordMask(Context* c, unsigned)
@ -217,14 +217,14 @@ Site* addressSite(Context* c, Promise* address)
return new (c->zone) AddressSite(address);
}
RegisterSite::RegisterSite(uint32_t mask, int number)
RegisterSite::RegisterSite(RegisterMask mask, Register number)
: mask_(mask), number(number)
{
}
unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
{
if (number != lir::NoRegister) {
if (number != NoRegister) {
return vm::snprintf(buffer, bufferSize, "%p register %d", this, number);
} else {
return vm::snprintf(
@ -234,11 +234,11 @@ unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
unsigned RegisterSite::copyCost(Context* c, Site* s)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if (s and (this == s
or (s->type(c) == lir::RegisterOperand
and (static_cast<RegisterSite*>(s)->mask_ & (1 << number))))) {
or (s->type(c) == lir::Operand::Type::RegisterPair
and (static_cast<RegisterSite*>(s)->mask_.contains(number))))) {
return 0;
} else {
return RegisterCopyCost;
@ -247,10 +247,10 @@ unsigned RegisterSite::copyCost(Context* c, Site* s)
bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
return ((static_cast<uint64_t>(1) << number) & mask.registerMask);
if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
return mask.registerMask.contains(number);
} else {
return false;
}
@ -258,10 +258,10 @@ bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
return ((static_cast<uint64_t>(1) << number) == mask.registerMask);
if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
return mask.registerMask.containsExactly(number);
} else {
return false;
}
@ -269,28 +269,28 @@ bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if (s->type(c) != lir::RegisterOperand) {
if (s->type(c) != lir::Operand::Type::RegisterPair) {
return false;
}
RegisterSite* rs = static_cast<RegisterSite*>(s);
unsigned size = rs->registerSize(c);
if (size > c->targetInfo.pointerSize) {
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
return number == rs->number;
} else {
uint32_t mask = c->regFile->generalRegisters.mask;
return ((1 << number) & mask) and ((1 << rs->number) & mask);
RegisterMask mask = c->regFile->generalRegisters;
return mask.contains(number) and mask.contains(rs->number);
}
}
void RegisterSite::acquire(Context* c, Value* v)
{
Target target;
if (number != lir::NoRegister) {
target = Target(number, lir::RegisterOperand, 0);
if (number != NoRegister) {
target = Target(number, 0);
} else {
target = pickRegisterTarget(c, v, mask_);
expect(c, target.cost < Target::Impossible);
@ -299,65 +299,65 @@ void RegisterSite::acquire(Context* c, Value* v)
RegisterResource* resource = c->registerResources + target.index;
compiler::acquire(c, resource, v, this);
number = target.index;
number = Register(target.index);
}
void RegisterSite::release(Context* c, Value* v)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
compiler::release(c, c->registerResources + number, v, this);
compiler::release(c, c->registerResources + number.index(), v, this);
}
void RegisterSite::freeze(Context* c, Value* v)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
c->registerResources[number].freeze(c, v);
c->registerResources[number.index()].freeze(c, v);
}
void RegisterSite::thaw(Context* c, Value* v)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
c->registerResources[number].thaw(c, v);
c->registerResources[number.index()].thaw(c, v);
}
bool RegisterSite::frozen(Context* c UNUSED)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
return c->registerResources[number].freezeCount != 0;
return c->registerResources[number.index()].freezeCount != 0;
}
lir::OperandType RegisterSite::type(Context*)
lir::Operand::Type RegisterSite::type(Context*)
{
return lir::RegisterOperand;
return lir::Operand::Type::RegisterPair;
}
void RegisterSite::asAssemblerOperand(Context* c UNUSED,
Site* high,
lir::Operand* result)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
int highNumber;
Register highNumber;
if (high != this) {
highNumber = static_cast<RegisterSite*>(high)->number;
assertT(c, highNumber != lir::NoRegister);
assertT(c, highNumber != NoRegister);
} else {
highNumber = lir::NoRegister;
highNumber = NoRegister;
}
new (result) lir::Register(number, highNumber);
new (result) lir::RegisterPair(number, highNumber);
}
Site* RegisterSite::copy(Context* c)
{
uint32_t mask;
RegisterMask mask;
if (number != lir::NoRegister) {
mask = 1 << number;
if (number != NoRegister) {
mask = RegisterMask(number);
} else {
mask = mask_;
}
@ -377,64 +377,64 @@ Site* RegisterSite::copyHigh(Context* c)
Site* RegisterSite::makeNextWord(Context* c, unsigned)
{
assertT(c, number != lir::NoRegister);
assertT(c, ((1 << number) & c->regFile->generalRegisters.mask));
assertT(c, number != NoRegister);
assertT(c, c->regFile->generalRegisters.contains(number));
return freeRegisterSite(c, c->regFile->generalRegisters.mask);
return freeRegisterSite(c, c->regFile->generalRegisters);
}
SiteMask RegisterSite::mask(Context* c UNUSED)
{
return SiteMask(1 << lir::RegisterOperand, mask_, NoFrameIndex);
return SiteMask(lir::Operand::RegisterPairMask, mask_, NoFrameIndex);
}
SiteMask RegisterSite::nextWordMask(Context* c, unsigned)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if (registerSize(c) > c->targetInfo.pointerSize) {
return SiteMask(1 << lir::RegisterOperand, number, NoFrameIndex);
return SiteMask(lir::Operand::RegisterPairMask, number, NoFrameIndex);
} else {
return SiteMask(1 << lir::RegisterOperand,
c->regFile->generalRegisters.mask,
return SiteMask(lir::Operand::RegisterPairMask,
c->regFile->generalRegisters,
NoFrameIndex);
}
}
unsigned RegisterSite::registerSize(Context* c)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
if ((1 << number) & c->regFile->floatRegisters.mask) {
if (c->regFile->floatRegisters.contains(number)) {
return c->arch->floatRegisterSize();
} else {
return c->targetInfo.pointerSize;
}
}
unsigned RegisterSite::registerMask(Context* c UNUSED)
RegisterMask RegisterSite::registerMask(Context* c UNUSED)
{
assertT(c, number != lir::NoRegister);
assertT(c, number != NoRegister);
return 1 << number;
return RegisterMask(number);
}
Site* registerSite(Context* c, int number)
Site* registerSite(Context* c, Register number)
{
assertT(c, number >= 0);
assertT(c, number != NoRegister);
assertT(c,
(1 << number) & (c->regFile->generalRegisters.mask
| c->regFile->floatRegisters.mask));
(c->regFile->generalRegisters
| c->regFile->floatRegisters).contains(number));
return new (c->zone) RegisterSite(1 << number, number);
return new (c->zone) RegisterSite(RegisterMask(number), number);
}
Site* freeRegisterSite(Context* c, uint32_t mask)
Site* freeRegisterSite(Context* c, RegisterMask mask)
{
return new (c->zone) RegisterSite(mask, lir::NoRegister);
return new (c->zone) RegisterSite(mask, NoRegister);
}
MemorySite::MemorySite(int base, int offset, int index, unsigned scale)
MemorySite::MemorySite(Register base, int offset, Register index, unsigned scale)
: acquired(false), base(base), offset(offset), index(index), scale(scale)
{
}
@ -453,7 +453,7 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
{
assertT(c, acquired);
if (s and (this == s or (s->type(c) == lir::MemoryOperand
if (s and (this == s or (s->type(c) == lir::Operand::Type::Memory
and static_cast<MemorySite*>(s)->base == base
and static_cast<MemorySite*>(s)->offset == offset
and static_cast<MemorySite*>(s)->index == index
@ -466,20 +466,20 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
bool MemorySite::conflicts(const SiteMask& mask)
{
return (mask.typeMask & (1 << lir::RegisterOperand)) != 0
and (((1 << base) & mask.registerMask) == 0
or (index != lir::NoRegister
and ((1 << index) & mask.registerMask) == 0));
return (mask.typeMask & lir::Operand::RegisterPairMask) != 0
and (!mask.registerMask.contains(base)
or (index != NoRegister
and !mask.registerMask.contains(index)));
}
bool MemorySite::match(Context* c, const SiteMask& mask)
{
assertT(c, acquired);
if (mask.typeMask & (1 << lir::MemoryOperand)) {
if (mask.typeMask & lir::Operand::MemoryMask) {
if (mask.frameIndex >= 0) {
if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister);
assertT(c, index == NoRegister);
return static_cast<int>(frameIndexToOffset(c, mask.frameIndex))
== offset;
} else {
@ -497,9 +497,9 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
{
assertT(c, acquired);
if (mask.typeMask & (1 << lir::MemoryOperand)) {
if (mask.typeMask & lir::Operand::MemoryMask) {
if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister);
assertT(c, index == NoRegister);
if (mask.frameIndex == AnyFrameIndex) {
return false;
@ -513,7 +513,7 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
{
if (s->type(c) == lir::MemoryOperand) {
if (s->type(c) == lir::Operand::Type::Memory) {
MemorySite* ms = static_cast<MemorySite*>(s);
return ms->base == this->base
and ((index == 1
@ -532,13 +532,13 @@ bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
void MemorySite::acquire(Context* c, Value* v)
{
c->registerResources[base].increment(c);
if (index != lir::NoRegister) {
c->registerResources[index].increment(c);
c->registerResources[base.index()].increment(c);
if (index != NoRegister) {
c->registerResources[index.index()].increment(c);
}
if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister);
assertT(c, index == NoRegister);
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
compiler::acquire(
@ -551,16 +551,16 @@ void MemorySite::acquire(Context* c, Value* v)
void MemorySite::release(Context* c, Value* v)
{
if (base == c->arch->stack()) {
assertT(c, index == lir::NoRegister);
assertT(c, index == NoRegister);
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
compiler::release(
c, c->frameResources + offsetToFrameIndex(c, offset), v, this);
}
c->registerResources[base].decrement(c);
if (index != lir::NoRegister) {
c->registerResources[index].decrement(c);
c->registerResources[base.index()].decrement(c);
if (index != NoRegister) {
c->registerResources[index.index()].decrement(c);
}
acquired = false;
@ -571,9 +571,9 @@ void MemorySite::freeze(Context* c, Value* v)
if (base == c->arch->stack()) {
c->frameResources[offsetToFrameIndex(c, offset)].freeze(c, v);
} else {
c->registerResources[base].increment(c);
if (index != lir::NoRegister) {
c->registerResources[index].increment(c);
c->registerResources[base.index()].increment(c);
if (index != NoRegister) {
c->registerResources[index.index()].increment(c);
}
}
}
@ -583,9 +583,9 @@ void MemorySite::thaw(Context* c, Value* v)
if (base == c->arch->stack()) {
c->frameResources[offsetToFrameIndex(c, offset)].thaw(c, v);
} else {
c->registerResources[base].decrement(c);
if (index != lir::NoRegister) {
c->registerResources[index].decrement(c);
c->registerResources[base.index()].decrement(c);
if (index != NoRegister) {
c->registerResources[index.index()].decrement(c);
}
}
}
@ -596,9 +596,9 @@ bool MemorySite::frozen(Context* c)
and c->frameResources[offsetToFrameIndex(c, offset)].freezeCount != 0;
}
lir::OperandType MemorySite::type(Context*)
lir::Operand::Type MemorySite::type(Context*)
{
return lir::MemoryOperand;
return lir::Operand::Type::Memory;
}
void MemorySite::asAssemblerOperand(Context* c UNUSED,
@ -657,7 +657,7 @@ Site* MemorySite::makeNextWord(Context* c, unsigned index)
SiteMask MemorySite::mask(Context* c)
{
return SiteMask(1 << lir::MemoryOperand,
return SiteMask(lir::Operand::MemoryMask,
0,
(base == c->arch->stack())
? static_cast<int>(offsetToFrameIndex(c, offset))
@ -668,13 +668,13 @@ SiteMask MemorySite::nextWordMask(Context* c, unsigned index)
{
int frameIndex;
if (base == c->arch->stack()) {
assertT(c, this->index == lir::NoRegister);
assertT(c, this->index == NoRegister);
frameIndex = static_cast<int>(offsetToFrameIndex(c, offset))
+ ((index == 1) xor c->arch->bigEndian() ? 1 : -1);
} else {
frameIndex = NoFrameIndex;
}
return SiteMask(1 << lir::MemoryOperand, 0, frameIndex);
return SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
}
bool MemorySite::isVolatile(Context* c)
@ -683,9 +683,9 @@ bool MemorySite::isVolatile(Context* c)
}
MemorySite* memorySite(Context* c,
int base,
Register base,
int offset,
int index,
Register index,
unsigned scale)
{
return new (c->zone) MemorySite(base, offset, index, scale);
@ -697,7 +697,7 @@ MemorySite* frameSite(Context* c, int frameIndex)
return memorySite(c,
c->arch->stack(),
frameIndexToOffset(c, frameIndex),
lir::NoRegister,
NoRegister,
0);
}

View File

@ -34,30 +34,30 @@ class SiteMask {
{
}
SiteMask(uint8_t typeMask, uint32_t registerMask, int frameIndex)
SiteMask(uint8_t typeMask, RegisterMask registerMask, int frameIndex)
: typeMask(typeMask), registerMask(registerMask), frameIndex(frameIndex)
{
}
SiteMask intersectionWith(const SiteMask& b);
static SiteMask fixedRegisterMask(int number)
static SiteMask fixedRegisterMask(Register number)
{
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex);
return SiteMask(lir::Operand::RegisterPairMask, 1 << number.index(), NoFrameIndex);
}
static SiteMask lowPart(const OperandMask& mask)
{
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex);
return SiteMask(mask.typeMask, mask.lowRegisterMask, AnyFrameIndex);
}
static SiteMask highPart(const OperandMask& mask)
{
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex);
return SiteMask(mask.typeMask, mask.highRegisterMask, AnyFrameIndex);
}
uint8_t typeMask;
uint32_t registerMask;
RegisterMask registerMask;
int frameIndex;
};
@ -103,7 +103,7 @@ class Site {
return false;
}
virtual lir::OperandType type(Context*) = 0;
virtual lir::Operand::Type type(Context*) = 0;
virtual void asAssemblerOperand(Context*, Site*, lir::Operand*) = 0;
@ -121,9 +121,9 @@ class Site {
virtual unsigned registerSize(Context*);
virtual unsigned registerMask(Context*)
virtual RegisterMask registerMask(Context*)
{
return 0;
return RegisterMask(0);
}
virtual bool isVolatile(Context*)
@ -187,7 +187,7 @@ class ConstantSite : public Site {
virtual bool match(Context*, const SiteMask& mask)
{
return mask.typeMask & (1 << lir::ConstantOperand);
return mask.typeMask & lir::Operand::ConstantMask;
}
virtual bool loneMatch(Context*, const SiteMask&)
@ -197,12 +197,12 @@ class ConstantSite : public Site {
virtual bool matchNextWord(Context* c, Site* s, unsigned)
{
return s->type(c) == lir::ConstantOperand;
return s->type(c) == lir::Operand::Type::Constant;
}
virtual lir::OperandType type(Context*)
virtual lir::Operand::Type type(Context*)
{
return lir::ConstantOperand;
return lir::Operand::Type::Constant;
}
virtual void asAssemblerOperand(Context* c, Site* high, lir::Operand* result)
@ -236,12 +236,12 @@ class ConstantSite : public Site {
virtual SiteMask mask(Context*)
{
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex);
return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
}
virtual SiteMask nextWordMask(Context*, unsigned)
{
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex);
return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
}
Promise* value;
@ -251,7 +251,7 @@ Site* addressSite(Context* c, Promise* address);
class RegisterSite : public Site {
public:
RegisterSite(uint32_t mask, int number);
RegisterSite(RegisterMask mask, Register number);
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
@ -273,7 +273,7 @@ class RegisterSite : public Site {
virtual bool frozen(Context* c UNUSED);
virtual lir::OperandType type(Context*);
virtual lir::Operand::Type type(Context*);
virtual void asAssemblerOperand(Context* c UNUSED,
Site* high,
@ -293,18 +293,18 @@ class RegisterSite : public Site {
virtual unsigned registerSize(Context* c);
virtual unsigned registerMask(Context* c UNUSED);
virtual RegisterMask registerMask(Context* c UNUSED);
uint32_t mask_;
int number;
RegisterMask mask_;
Register number;
};
Site* registerSite(Context* c, int number);
Site* freeRegisterSite(Context* c, uint32_t mask);
Site* registerSite(Context* c, Register number);
Site* freeRegisterSite(Context* c, RegisterMask mask);
class MemorySite : public Site {
public:
MemorySite(int base, int offset, int index, unsigned scale);
MemorySite(Register base, int offset, Register index, unsigned scale);
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
@ -328,7 +328,7 @@ class MemorySite : public Site {
virtual bool frozen(Context* c);
virtual lir::OperandType type(Context*);
virtual lir::Operand::Type type(Context*);
virtual void asAssemblerOperand(Context* c UNUSED,
Site* high UNUSED,
@ -351,16 +351,16 @@ class MemorySite : public Site {
virtual bool isVolatile(Context* c);
bool acquired;
int base;
Register base;
int offset;
int index;
Register index;
unsigned scale;
};
MemorySite* memorySite(Context* c,
int base,
Register base,
int offset = 0,
int index = lir::NoRegister,
Register index = NoRegister,
unsigned scale = 1);
MemorySite* frameSite(Context* c, int frameIndex);

View File

@ -1,35 +0,0 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include <avian/codegen/registers.h>
namespace avian {
namespace codegen {
unsigned RegisterMask::maskStart(uint32_t mask)
{
for (int i = 0; i <= 31; ++i) {
if (mask & (1 << i))
return i;
}
return 32;
}
unsigned RegisterMask::maskLimit(uint32_t mask)
{
for (int i = 31; i >= 0; --i) {
if (mask & (1 << i))
return i + 1;
}
return 0;
}
} // namespace codegen
} // namespace avian

View File

@ -4,5 +4,6 @@ add_library(avian_codegen_arm
context.cpp
fixup.cpp
multimethod.cpp
operations.cpp
operations32.cpp
operations64.cpp
)

View File

@ -39,7 +39,7 @@ namespace isa {
bool vfpSupported()
{
// TODO: Use at runtime detection
#if defined(__ARM_PCS_VFP)
#if (defined __ARM_PCS_VFP) || (defined ARCH_arm64)
// armhf
return true;
#else
@ -52,17 +52,12 @@ bool vfpSupported()
}
} // namespace isa
inline unsigned lo8(int64_t i)
{
return (unsigned)(i & MASK_LO8);
}
const RegisterFile MyRegisterFileWithoutFloats(GPR_MASK, 0);
const RegisterFile MyRegisterFileWithFloats(GPR_MASK, FPR_MASK);
const unsigned FrameHeaderSize = 1;
const unsigned FrameHeaderSize = TargetBytesPerWord / 4;
const unsigned StackAlignmentInBytes = 8;
const unsigned StackAlignmentInBytes = TargetBytesPerWord * 2;
const unsigned StackAlignmentInWords = StackAlignmentInBytes
/ TargetBytesPerWord;
@ -94,11 +89,11 @@ void nextFrame(ArchitectureContext* con,
void** stack)
{
assertT(con, *ip >= start);
assertT(con, *ip <= start + (size / TargetBytesPerWord));
assertT(con, *ip <= start + (size / 4));
uint32_t* instruction = static_cast<uint32_t*>(*ip);
if ((*start >> 20) == 0xe59) {
if ((*start >> 20) == (TargetBytesPerWord == 8 ? 0xf94 : 0xe59)) {
// skip stack overflow check
start += 3;
}
@ -116,7 +111,8 @@ void nextFrame(ArchitectureContext* con,
return;
}
if (*instruction == 0xe12fff1e) { // return
if (*instruction == (TargetBytesPerWord == 8 ? 0xd61f03c0 : 0xe12fff1e)) {
// return
*ip = link;
return;
}
@ -129,7 +125,20 @@ void nextFrame(ArchitectureContext* con,
// check for post-non-tail-call stack adjustment of the form "sub
// sp, sp, #offset":
if ((*instruction >> 12) == 0xe24dd) {
if (TargetBytesPerWord == 8 and (*instruction & 0xff0003ff) == 0xd10003ff) {
unsigned value = (*instruction >> 10) & 0xfff;
unsigned shift = (*instruction >> 22) & 1;
switch (shift) {
case 0:
offset -= value / TargetBytesPerWord;
break;
case 1:
offset -= (value << 12) / TargetBytesPerWord;
break;
default:
abort(con);
}
} else if (TargetBytesPerWord == 4 and (*instruction >> 12) == 0xe24dd) {
unsigned value = *instruction & 0xff;
unsigned rotation = (*instruction >> 8) & 0xf;
switch (rotation) {
@ -169,39 +178,39 @@ class MyArchitecture : public Architecture {
: &MyRegisterFileWithoutFloats;
}
virtual int scratch()
virtual Register scratch()
{
return 5;
return Register(5);
}
virtual int stack()
virtual Register stack()
{
return StackRegister;
}
virtual int thread()
virtual Register thread()
{
return ThreadRegister;
}
virtual int returnLow()
virtual Register returnLow()
{
return 0;
return Register(0);
}
virtual int returnHigh()
virtual Register returnHigh()
{
return 1;
return Register(1);
}
virtual int virtualCallTarget()
virtual Register virtualCallTarget()
{
return 4;
return Register(4);
}
virtual int virtualCallIndex()
virtual Register virtualCallIndex()
{
return 3;
return Register(3);
}
virtual ir::TargetInfo targetInfo()
@ -219,13 +228,14 @@ class MyArchitecture : public Architecture {
return 0x1FFFFFF;
}
virtual bool reserved(int register_)
virtual bool reserved(Register register_)
{
switch (register_) {
case LinkRegister:
case StackRegister:
case ThreadRegister:
case ProgramCounter:
switch (register_.index()) {
case LinkRegister.index():
case FrameRegister.index():
case StackRegister.index():
case ThreadRegister.index():
case ProgramCounter.index():
return true;
default:
@ -263,14 +273,14 @@ class MyArchitecture : public Architecture {
virtual unsigned argumentRegisterCount()
{
return 4;
return TargetBytesPerWord;
}
virtual int argumentRegister(unsigned index)
virtual Register argumentRegister(unsigned index)
{
assertT(&con, index < argumentRegisterCount());
return index;
return Register(index);
}
virtual bool hasLinkRegister()
@ -311,8 +321,13 @@ class MyArchitecture : public Architecture {
case lir::AlignedLongCall:
case lir::AlignedLongJump: {
uint32_t* p = static_cast<uint32_t*>(returnAddress) - 2;
*reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4))
= newTarget;
if (TargetBytesPerWord == 8) {
const int32_t mask = (PoolOffsetMask >> 2) << 5;
*reinterpret_cast<void**>(p + ((*p & mask) >> 5)) = newTarget;
} else {
*reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4))
= newTarget;
}
} break;
default:
@ -401,8 +416,8 @@ class MyArchitecture : public Architecture {
OperandMask& aMask,
bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = ~static_cast<uint64_t>(0);
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
*thunk = false;
}
@ -414,12 +429,12 @@ class MyArchitecture : public Architecture {
{
*thunk = false;
aMask.typeMask = ~0;
aMask.registerMask = GPR_MASK64;
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
switch (op) {
case lir::Negate:
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break;
case lir::Absolute:
@ -431,30 +446,30 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate:
case lir::Float2Float:
if (vfpSupported()) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
} else {
*thunk = true;
}
break;
case lir::Float2Int:
// todo: Java requires different semantics than SSE for
// todo: Java requires different semantics than VFP for
// converting floats to integers, we we need to either use
// thunks or produce inline machine code which handles edge
// cases properly.
if (false && vfpSupported() && bSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
if (false && vfpSupported() && bSize <= TargetBytesPerWord) {
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
} else {
*thunk = true;
}
break;
case lir::Int2Float:
if (vfpSupported() && aSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
if (vfpSupported() && aSize <= TargetBytesPerWord) {
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
} else {
*thunk = true;
}
@ -471,13 +486,13 @@ class MyArchitecture : public Architecture {
unsigned,
OperandMask& bMask)
{
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.registerMask = GPR_MASK64;
bMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask;
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
switch (op) {
case lir::Negate:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break;
case lir::FloatAbsolute:
@ -485,18 +500,18 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate:
case lir::Float2Float:
case lir::Int2Float:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = FPR_MASK64;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
break;
case lir::Float2Int:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
break;
case lir::Move:
if (!(aMask.typeMask & 1 << lir::RegisterOperand)) {
bMask.typeMask = 1 << lir::RegisterOperand;
if (!(aMask.typeMask & lir::Operand::RegisterPairMask)) {
bMask.typeMask = lir::Operand::RegisterPairMask;
}
break;
@ -511,21 +526,21 @@ class MyArchitecture : public Architecture {
const OperandMask& dstMask)
{
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
tmpMask.setLowHighRegisterMasks(0, 0);
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
if (dstMask.typeMask & lir::Operand::MemoryMask) {
// can't move directly from memory or constant to memory
srcMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask = GPR_MASK64;
} else if (vfpSupported() && dstMask.typeMask & 1 << lir::RegisterOperand
&& dstMask.registerMask & FPR_MASK) {
srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand
| 1 << lir::MemoryOperand;
tmpMask.registerMask = ~static_cast<uint64_t>(0);
srcMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
} else if (vfpSupported() && dstMask.typeMask & lir::Operand::RegisterPairMask
&& dstMask.lowRegisterMask & FPR_MASK) {
srcMask.typeMask = tmpMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
tmpMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
}
}
@ -537,11 +552,11 @@ class MyArchitecture : public Architecture {
unsigned,
bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = GPR_MASK64;
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
*thunk = false;
@ -549,8 +564,8 @@ class MyArchitecture : public Architecture {
case lir::ShiftLeft:
case lir::ShiftRight:
case lir::UnsignedShiftRight:
if (bSize == 8)
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
if (bSize > TargetBytesPerWord)
aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
break;
case lir::Add:
@ -558,9 +573,14 @@ class MyArchitecture : public Architecture {
case lir::Or:
case lir::Xor:
case lir::Multiply:
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
break;
// todo: Although ARM has instructions for integer division and
// remainder, they don't trap on division by zero, which is why
// we use thunks. Alternatively, we could generate inline code
// with an explicit zero check, which would probably be a bit
// faster.
case lir::Divide:
case lir::Remainder:
case lir::FloatRemainder:
@ -572,8 +592,9 @@ class MyArchitecture : public Architecture {
case lir::FloatMultiply:
case lir::FloatDivide:
if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
bMask = aMask;
} else {
*thunk = true;
}
@ -590,8 +611,9 @@ class MyArchitecture : public Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (vfpSupported()) {
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
bMask = aMask;
} else {
*thunk = true;
}
@ -611,11 +633,12 @@ class MyArchitecture : public Architecture {
OperandMask& cMask)
{
if (isBranch(op)) {
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
cMask.typeMask = lir::Operand::ConstantMask;
cMask.setLowHighRegisterMasks(0, 0);
} else {
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = bMask.registerMask;
cMask.typeMask = lir::Operand::RegisterPairMask;
cMask.lowRegisterMask = bMask.lowRegisterMask;
cMask.highRegisterMask = bMask.highRegisterMask;
}
}
@ -658,7 +681,7 @@ class MyAssembler : public Assembler {
virtual void checkStackOverflow(uintptr_t handler,
unsigned stackLimitOffsetFromThread)
{
lir::Register stack(StackRegister);
lir::RegisterPair stack(StackRegister);
lir::Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
lir::Constant handlerConstant(new (con.zone) ResolvedPromise(handler));
branchRM(&con,
@ -671,11 +694,11 @@ class MyAssembler : public Assembler {
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset)
{
lir::Register link(LinkRegister);
lir::RegisterPair link(LinkRegister);
lir::Memory linkDst(ThreadRegister, ipOffset);
moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst);
lir::Register stack(StackRegister);
lir::RegisterPair stack(StackRegister);
lir::Memory stackDst(ThreadRegister, stackOffset);
moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
}
@ -684,7 +707,7 @@ class MyAssembler : public Assembler {
{
struct Argument {
unsigned size;
lir::OperandType type;
lir::Operand::Type type;
lir::Operand* operand;
};
RUNTIME_ARRAY(Argument, arguments, argumentCount);
@ -695,7 +718,7 @@ class MyAssembler : public Assembler {
for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
RUNTIME_ARRAY_BODY(arguments)[i].type
= static_cast<lir::OperandType>(va_arg(a, int));
= static_cast<lir::Operand::Type>(va_arg(a, int));
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord);
@ -707,7 +730,7 @@ class MyAssembler : public Assembler {
unsigned offset = 0;
for (unsigned i = 0; i < argumentCount; ++i) {
if (i < arch_->argumentRegisterCount()) {
lir::Register dst(arch_->argumentRegister(i));
lir::RegisterPair dst(arch_->argumentRegister(i));
apply(lir::Move,
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -715,7 +738,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord),
lir::RegisterOperand,
lir::Operand::Type::RegisterPair,
&dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -729,7 +752,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord),
lir::MemoryOperand,
lir::Operand::Type::Memory,
&dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
@ -747,24 +770,50 @@ class MyAssembler : public Assembler {
// how to handle them:
assertT(&con, footprint < 256);
lir::Register stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
// todo: the ARM ABI says the frame preamble should be of the form
//
// stp x29, x30, [sp,#-footprint]!
// mov x29, sp
//
// and the frame should be popped with e.g.
//
// ldp x29, x30, [sp],#footprint
// br x30
//
// However, that will invalidate a lot of assumptions elsewhere
// about the return address being stored at the opposite end of
// the frame, so lots of other code will need to change before we
// can do that. The code below can be enabled as a starting point
// when we're ready to tackle that.
if (false and TargetBytesPerWord == 8) {
// stp x29, x30, [sp,#-footprint]!
con.code.append4(0xa9800000 | ((-footprint & 0x7f) << 15)
| (StackRegister.index() << 5)
| (LinkRegister.index() << 10) | FrameRegister.index());
lir::Register returnAddress(LinkRegister);
lir::Memory returnAddressDst(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveRM(&con,
TargetBytesPerWord,
&returnAddress,
TargetBytesPerWord,
&returnAddressDst);
lir::RegisterPair stack(StackRegister);
lir::RegisterPair frame(FrameRegister);
moveRR(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &frame);
} else {
lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressDst(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveRM(&con,
TargetBytesPerWord,
&returnAddress,
TargetBytesPerWord,
&returnAddressDst);
}
}
virtual void adjustFrame(unsigned difference)
{
lir::Register stack(StackRegister);
lir::RegisterPair stack(StackRegister);
ResolvedPromise differencePromise(difference * TargetBytesPerWord);
lir::Constant differenceConstant(&differencePromise);
subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack);
@ -774,33 +823,40 @@ class MyAssembler : public Assembler {
{
footprint += FrameHeaderSize;
lir::Register returnAddress(LinkRegister);
lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveMR(&con,
TargetBytesPerWord,
&returnAddressSrc,
TargetBytesPerWord,
&returnAddress);
// see comment regarding the ARM64 ABI in allocateFrame
if (false and TargetBytesPerWord == 8) {
// ldp x29, x30, [sp],#footprint
con.code.append4(0xa8c00000 | (footprint << 15) | (31 << 5) | (30 << 10)
| 29);
} else {
lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveMR(&con,
TargetBytesPerWord,
&returnAddressSrc,
TargetBytesPerWord,
&returnAddress);
lir::Register stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
}
}
virtual void popFrameForTailCall(unsigned footprint,
int offset,
int returnAddressSurrogate,
int framePointerSurrogate UNUSED)
Register returnAddressSurrogate,
Register framePointerSurrogate UNUSED)
{
assertT(&con, framePointerSurrogate == lir::NoRegister);
assertT(&con, framePointerSurrogate == NoRegister);
if (TailCalls) {
if (offset) {
footprint += FrameHeaderSize;
lir::Register link(LinkRegister);
lir::RegisterPair link(LinkRegister);
lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveMR(&con,
@ -809,16 +865,16 @@ class MyAssembler : public Assembler {
TargetBytesPerWord,
&link);
lir::Register stack(StackRegister);
lir::RegisterPair stack(StackRegister);
ResolvedPromise footprintPromise((footprint - offset)
* TargetBytesPerWord);
lir::Constant footprintConstant(&footprintPromise);
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
if (returnAddressSurrogate != lir::NoRegister) {
if (returnAddressSurrogate != NoRegister) {
assertT(&con, offset > 0);
lir::Register ras(returnAddressSurrogate);
lir::RegisterPair ras(returnAddressSurrogate);
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
}
@ -842,7 +898,7 @@ class MyAssembler : public Assembler {
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
offset = argumentFootprint - StackAlignmentInWords;
lir::Register stack(StackRegister);
lir::RegisterPair stack(StackRegister);
ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord);
lir::Constant adjustment(&adjustmentPromise);
addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack);
@ -853,12 +909,28 @@ class MyAssembler : public Assembler {
return_(&con);
}
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,
virtual void popFrameAndUpdateStackAndReturn(unsigned footprint,
unsigned stackOffsetFromThread)
{
popFrame(frameFootprint);
footprint += FrameHeaderSize;
lir::Register stack(StackRegister);
// see comment regarding the ARM64 ABI in allocateFrame
if (false and TargetBytesPerWord == 8) {
// ldp x29, x30, [sp],#footprint
con.code.append4(0xa8c00000 | (footprint << 15) | (31 << 5) | (30 << 10)
| 29);
} else {
lir::RegisterPair returnAddress(LinkRegister);
lir::Memory returnAddressSrc(StackRegister,
(footprint - 1) * TargetBytesPerWord);
moveMR(&con,
TargetBytesPerWord,
&returnAddressSrc,
TargetBytesPerWord,
&returnAddress);
}
lir::RegisterPair stack(StackRegister);
lir::Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack);
@ -890,14 +962,14 @@ class MyAssembler : public Assembler {
if (isBranch(op)) {
assertT(&con, a.size == b.size);
assertT(&con, c.size == TargetBytesPerWord);
assertT(&con, c.type == lir::ConstantOperand);
assertT(&con, c.type == lir::Operand::Type::Constant);
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)](
&con, op, a.size, a.operand, b.operand, c.operand);
} else {
assertT(&con, b.size == c.size);
assertT(&con, b.type == lir::RegisterOperand);
assertT(&con, c.type == lir::RegisterOperand);
assertT(&con, b.type == lir::Operand::Type::RegisterPair);
assertT(&con, c.type == lir::Operand::Type::RegisterPair);
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)](
&con, b.size, a.operand, b.operand, c.operand);
@ -948,17 +1020,28 @@ class MyAssembler : public Assembler {
unsigned instruction = o->block->start + padding(o->block, o->offset)
+ o->offset;
int32_t v = (entry - 8) - instruction;
expect(&con, v == (v & PoolOffsetMask));
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
if (TargetBytesPerWord == 8) {
int32_t v = entry - instruction;
expect(&con, v == (v & PoolOffsetMask));
const int32_t mask = (PoolOffsetMask >> 2) << 5;
*p = (((v >> 2) << 5) & mask) | ((~mask) & *p);
} else {
int32_t v = (entry - 8) - instruction;
expect(&con, v == (v & PoolOffsetMask));
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
}
poolSize += TargetBytesPerWord;
}
bool jump = needJump(b);
if (jump) {
expect(&con, TargetBytesPerWord == 4);
write4(dst + dstOffset,
isa::b((poolSize + TargetBytesPerWord - 8) >> 2));
}

View File

@ -85,15 +85,15 @@ class ArchitectureContext {
vm::System* s;
OperationType operations[lir::OperationCount];
UnaryOperationType
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount];
unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
BinaryOperationType binaryOperations[lir::BinaryOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
* lir::Operand::TypeCount
* lir::Operand::TypeCount];
TernaryOperationType ternaryOperations[lir::NonBranchTernaryOperationCount
* lir::OperandTypeCount];
* lir::Operand::TypeCount];
BranchOperationType branchOperations[lir::BranchOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
* lir::Operand::TypeCount
* lir::Operand::TypeCount];
};
inline avian::util::Aborter* getAborter(Context* c)

View File

@ -46,34 +46,34 @@ enum CONDITION {
enum SHIFTOP { LSL, LSR, ASR, ROR };
// INSTRUCTION FORMATS
inline int
DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm)
DATA(int cond, int opcode, int S, Register Rn, Register Rd, int shift, int Sh, Register Rm)
{
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | shift << 7
| Sh << 5 | Rm;
return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | shift << 7
| Sh << 5 | Rm.index();
}
inline int
DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm)
DATAS(int cond, int opcode, int S, Register Rn, Register Rd, Register Rs, int Sh, Register Rm)
{
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | Rs << 8
| Sh << 5 | 1 << 4 | Rm;
return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | Rs.index() << 8
| Sh << 5 | 1 << 4 | Rm.index();
}
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm)
inline int DATAI(int cond, int opcode, int S, Register Rn, Register Rd, int rot, int imm)
{
return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12
return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12
| rot << 8 | (imm & 0xff);
}
inline int BRANCH(int cond, int L, int offset)
{
return cond << 28 | 5 << 25 | L << 24 | (offset & 0xffffff);
}
inline int BRANCHX(int cond, int L, int Rm)
inline int BRANCHX(int cond, int L, Register Rm)
{
return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm;
return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm.index();
}
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm)
inline int MULTIPLY(int cond, int mul, int S, Register Rd, Register Rn, Register Rs, Register Rm)
{
return cond << 28 | mul << 21 | S << 20 | Rd << 16 | Rn << 12 | Rs << 8
| 9 << 4 | Rm;
return cond << 28 | mul << 21 | S << 20 | Rd.index() << 16 | Rn.index() << 12 | Rs.index() << 8
| 9 << 4 | Rm.index();
}
inline int XFER(int cond,
int P,
@ -81,14 +81,14 @@ inline int XFER(int cond,
int B,
int W,
int L,
int Rn,
int Rd,
Register Rn,
Register Rd,
int shift,
int Sh,
int Rm)
Register Rm)
{
return cond << 28 | 3 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
| Rn << 16 | Rd << 12 | shift << 7 | Sh << 5 | Rm;
| Rn.index() << 16 | Rd.index() << 12 | shift << 7 | Sh << 5 | Rm.index();
}
inline int XFERI(int cond,
int P,
@ -96,41 +96,41 @@ inline int XFERI(int cond,
int B,
int W,
int L,
int Rn,
int Rd,
Register Rn,
Register Rd,
int offset)
{
return cond << 28 | 2 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
| Rn << 16 | Rd << 12 | (offset & 0xfff);
| Rn.index() << 16 | Rd.index() << 12 | (offset & 0xfff);
}
inline int XFER2(int cond,
int P,
int U,
int W,
int L,
int Rn,
int Rd,
Register Rn,
Register Rd,
int S,
int H,
int Rm)
Register Rm)
{
return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn << 16
| Rd << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm;
return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn.index() << 16
| Rd.index() << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm.index();
}
inline int XFER2I(int cond,
int P,
int U,
int W,
int L,
int Rn,
int Rd,
Register Rn,
Register Rd,
int offsetH,
int S,
int H,
int offsetL)
{
return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn << 16
| Rd << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4
return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn.index() << 16
| Rd.index() << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4
| (offsetL & 0xf);
}
inline int COOP(int cond,
@ -150,30 +150,30 @@ inline int COXFER(int cond,
int N,
int W,
int L,
int Rn,
Register Rn,
int CRd,
int cp_num,
int offset) // offset is in words, not bytes
{
return cond << 28 | 0x6 << 25 | P << 24 | U << 23 | N << 22 | W << 21
| L << 20 | Rn << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2;
| L << 20 | Rn.index() << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2;
}
inline int COREG(int cond,
int opcode_1,
int L,
int CRn,
int Rd,
Register Rd,
int cp_num,
int opcode_2,
int CRm)
{
return cond << 28 | 0xe << 24 | opcode_1 << 21 | L << 20 | CRn << 16
| Rd << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm;
| Rd.index() << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm;
}
inline int
COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm)
COREG2(int cond, int L, Register Rn, Register Rd, int cp_num, int opcode, int CRm)
{
return cond << 28 | 0xc4 << 20 | L << 20 | Rn << 16 | Rd << 12 | cp_num << 8
return cond << 28 | 0xc4 << 20 | L << 20 | Rn.index() << 16 | Rd.index() << 12 | cp_num << 8
| opcode << 4 | CRm;
}
// FIELD CALCULATORS
@ -191,143 +191,143 @@ inline int bl(int offset)
{
return BRANCH(AL, 1, offset);
}
inline int bx(int Rm)
inline int bx(Register Rm)
{
return BRANCHX(AL, 0, Rm);
}
inline int blx(int Rm)
inline int blx(Register Rm)
{
return BRANCHX(AL, 1, Rm);
}
inline int and_(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int and_(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm);
}
inline int eor(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int eor(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm);
}
inline int rsb(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int rsb(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm);
}
inline int add(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int add(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm);
}
inline int adc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int adc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm);
}
inline int rsc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int rsc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm);
}
inline int cmp(int Rn, int Rm, int Sh = 0, int shift = 0)
inline int cmp(Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm);
return DATA(AL, 0xa, 1, Rn, Register(0), shift, Sh, Rm);
}
inline int orr(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
inline int orr(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm);
}
inline int mov(int Rd, int Rm, int Sh = 0, int shift = 0)
inline int mov(Register Rd, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm);
return DATA(AL, 0xd, 0, Register(0), Rd, shift, Sh, Rm);
}
inline int mvn(int Rd, int Rm, int Sh = 0, int shift = 0)
inline int mvn(Register Rd, Register Rm, int Sh = 0, int shift = 0)
{
return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm);
return DATA(AL, 0xf, 0, Register(0), Rd, shift, Sh, Rm);
}
inline int andi(int Rd, int Rn, int imm, int rot = 0)
inline int andi(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm);
}
inline int subi(int Rd, int Rn, int imm, int rot = 0)
inline int subi(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm);
}
inline int rsbi(int Rd, int Rn, int imm, int rot = 0)
inline int rsbi(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm);
}
inline int addi(int Rd, int Rn, int imm, int rot = 0)
inline int addi(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm);
}
inline int adci(int Rd, int Rn, int imm, int rot = 0)
inline int adci(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm);
}
inline int bici(int Rd, int Rn, int imm, int rot = 0)
inline int bici(Register Rd, Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm);
}
inline int cmpi(int Rn, int imm, int rot = 0)
inline int cmpi(Register Rn, int imm, int rot = 0)
{
return DATAI(AL, 0xa, 1, Rn, 0, rot, imm);
return DATAI(AL, 0xa, 1, Rn, Register(0), rot, imm);
}
inline int movi(int Rd, int imm, int rot = 0)
inline int movi(Register Rd, int imm, int rot = 0)
{
return DATAI(AL, 0xd, 0, 0, Rd, rot, imm);
return DATAI(AL, 0xd, 0, Register(0), Rd, rot, imm);
}
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh)
inline int orrsh(Register Rd, Register Rn, Register Rm, Register Rs, int Sh)
{
return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm);
}
inline int movsh(int Rd, int Rm, int Rs, int Sh)
inline int movsh(Register Rd, Register Rm, Register Rs, int Sh)
{
return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm);
return DATAS(AL, 0xd, 0, Register(0), Rd, Rs, Sh, Rm);
}
inline int mul(int Rd, int Rm, int Rs)
inline int mul(Register Rd, Register Rm, Register Rs)
{
return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm);
return MULTIPLY(AL, 0, 0, Rd, Register(0), Rs, Rm);
}
inline int mla(int Rd, int Rm, int Rs, int Rn)
inline int mla(Register Rd, Register Rm, Register Rs, Register Rn)
{
return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm);
}
inline int umull(int RdLo, int RdHi, int Rm, int Rs)
inline int umull(Register RdLo, Register RdHi, Register Rm, Register Rs)
{
return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm);
}
inline int ldr(int Rd, int Rn, int Rm, int W = 0)
inline int ldr(Register Rd, Register Rn, Register Rm, int W = 0)
{
return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm);
}
inline int ldri(int Rd, int Rn, int imm, int W = 0)
inline int ldri(Register Rd, Register Rn, int imm, int W = 0)
{
return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm));
}
inline int ldrb(int Rd, int Rn, int Rm)
inline int ldrb(Register Rd, Register Rn, Register Rm)
{
return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm);
}
inline int ldrbi(int Rd, int Rn, int imm)
inline int ldrbi(Register Rd, Register Rn, int imm)
{
return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm));
}
inline int str(int Rd, int Rn, int Rm, int W = 0)
inline int str(Register Rd, Register Rn, Register Rm, int W = 0)
{
return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm);
}
inline int stri(int Rd, int Rn, int imm, int W = 0)
inline int stri(Register Rd, Register Rn, int imm, int W = 0)
{
return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm));
}
inline int strb(int Rd, int Rn, int Rm)
inline int strb(Register Rd, Register Rn, Register Rm)
{
return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm);
}
inline int strbi(int Rd, int Rn, int imm)
inline int strbi(Register Rd, Register Rn, int imm)
{
return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm));
}
inline int ldrh(int Rd, int Rn, int Rm)
inline int ldrh(Register Rd, Register Rn, Register Rm)
{
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm);
}
inline int ldrhi(int Rd, int Rn, int imm)
inline int ldrhi(Register Rd, Register Rn, int imm)
{
return XFER2I(AL,
1,
@ -341,11 +341,11 @@ inline int ldrhi(int Rd, int Rn, int imm)
1,
abs(imm) & 0xf);
}
inline int strh(int Rd, int Rn, int Rm)
inline int strh(Register Rd, Register Rn, Register Rm)
{
return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm);
}
inline int strhi(int Rd, int Rn, int imm)
inline int strhi(Register Rd, Register Rn, int imm)
{
return XFER2I(AL,
1,
@ -359,11 +359,11 @@ inline int strhi(int Rd, int Rn, int imm)
1,
abs(imm) & 0xf);
}
inline int ldrsh(int Rd, int Rn, int Rm)
inline int ldrsh(Register Rd, Register Rn, Register Rm)
{
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm);
}
inline int ldrshi(int Rd, int Rn, int imm)
inline int ldrshi(Register Rd, Register Rn, int imm)
{
return XFER2I(AL,
1,
@ -377,11 +377,11 @@ inline int ldrshi(int Rd, int Rn, int imm)
1,
abs(imm) & 0xf);
}
inline int ldrsb(int Rd, int Rn, int Rm)
inline int ldrsb(Register Rd, Register Rn, Register Rm)
{
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm);
}
inline int ldrsbi(int Rd, int Rn, int imm)
inline int ldrsbi(Register Rd, Register Rn, int imm)
{
return XFER2I(AL,
1,
@ -403,27 +403,27 @@ inline int bkpt(int16_t immed)
// COPROCESSOR INSTRUCTIONS
inline int mcr(int coproc,
int opcode_1,
int Rd,
Register Rd,
int CRn,
int CRm,
int opcode_2 = 0)
{
return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm);
}
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm)
inline int mcrr(int coproc, int opcode, Register Rd, Register Rn, int CRm)
{
return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm);
}
inline int mrc(int coproc,
int opcode_1,
int Rd,
Register Rd,
int CRn,
int CRm,
int opcode_2 = 0)
{
return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm);
}
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm)
inline int mrrc(int coproc, int opcode, Register Rd, Register Rn, int CRm)
{
return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm);
}
@ -551,42 +551,42 @@ inline int ftosizd(int Sd, int Dm)
return COOP(AL, 0xb | (Sd & 1) << 2, 0xd, Sd >> 1, 11, 6, Dm);
}
// single load/store instructions for both precision types
inline int flds(int Sd, int Rn, int offset = 0)
inline int flds(int Sd, Register Rn, int offset = 0)
{
return COXFER(AL, 1, 1, Sd & 1, 0, 1, Rn, Sd >> 1, 10, offset);
};
inline int fldd(int Dd, int Rn, int offset = 0)
inline int fldd(int Dd, Register Rn, int offset = 0)
{
return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset);
};
inline int fsts(int Sd, int Rn, int offset = 0)
inline int fsts(int Sd, Register Rn, int offset = 0)
{
return COXFER(AL, 1, 1, Sd & 1, 0, 0, Rn, Sd >> 1, 10, offset);
};
inline int fstd(int Dd, int Rn, int offset = 0)
inline int fstd(int Dd, Register Rn, int offset = 0)
{
return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset);
};
// move between GPRs and FPRs
inline int fmsr(int Sn, int Rd)
inline int fmsr(int Sn, Register Rd)
{
return mcr(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
}
inline int fmrs(int Rd, int Sn)
inline int fmrs(Register Rd, int Sn)
{
return mrc(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
}
// move to/from VFP system registers
inline int fmrx(int Rd, int reg)
inline int fmrx(Register Rd, int reg)
{
return mrc(10, 7, Rd, reg, 0);
}
// these move around pairs of single-precision registers
inline int fmdrr(int Dm, int Rd, int Rn)
inline int fmdrr(int Dm, Register Rd, Register Rn)
{
return mcrr(11, 1, Rd, Rn, Dm);
}
inline int fmrrd(int Rd, int Rn, int Dm)
inline int fmrrd(Register Rd, Register Rn, int Dm)
{
return mrrc(11, 1, Rd, Rn, Dm);
}
@ -600,27 +600,27 @@ inline int SETS(int ins)
return ins | 1 << 20;
}
// PSEUDO-INSTRUCTIONS
inline int lsl(int Rd, int Rm, int Rs)
inline int lsl(Register Rd, Register Rm, Register Rs)
{
return movsh(Rd, Rm, Rs, LSL);
}
inline int lsli(int Rd, int Rm, int imm)
inline int lsli(Register Rd, Register Rm, int imm)
{
return mov(Rd, Rm, LSL, imm);
}
inline int lsr(int Rd, int Rm, int Rs)
inline int lsr(Register Rd, Register Rm, Register Rs)
{
return movsh(Rd, Rm, Rs, LSR);
}
inline int lsri(int Rd, int Rm, int imm)
inline int lsri(Register Rd, Register Rm, int imm)
{
return mov(Rd, Rm, LSR, imm);
}
inline int asr(int Rd, int Rm, int Rs)
inline int asr(Register Rd, Register Rm, Register Rs)
{
return movsh(Rd, Rm, Rs, ASR);
}
inline int asri(int Rd, int Rm, int imm)
inline int asri(Register Rd, Register Rm, int imm)
{
return mov(Rd, Rm, ASR, imm);
}
@ -670,7 +670,7 @@ inline int bpl(int offset)
}
inline int fmstat()
{
return fmrx(15, FPSCR);
return fmrx(Register(15), FPSCR);
}
// todo: make this pretty:
inline int dmb()

View File

@ -12,6 +12,12 @@
#include "fixup.h"
#include "block.h"
namespace {
const unsigned InstructionSize = 4;
} // namespace
namespace avian {
namespace codegen {
namespace arm {
@ -38,8 +44,7 @@ int64_t OffsetPromise::value()
assertT(con, resolved());
unsigned o = offset - block->offset;
return block->start
+ padding(block, forTrace ? o - vm::TargetBytesPerWord : o) + o;
return block->start + padding(block, forTrace ? o - InstructionSize : o) + o;
}
Promise* offsetPromise(Context* con, bool forTrace)
@ -92,17 +97,30 @@ bool bounded(int right, int left, int32_t v)
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value)
{
// ARM's PC is two words ahead, and branches drop the bottom 2 bits.
int32_t v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
int32_t mask;
expect(s, bounded(0, 8, v));
mask = 0xFFFFFF;
int32_t* p = reinterpret_cast<int32_t*>(instruction);
int32_t v;
int32_t mask;
if (vm::TargetBytesPerWord == 8) {
if ((*p >> 24) == 0x54) {
// conditional branch
v = ((reinterpret_cast<uint8_t*>(value) - instruction) >> 2) << 5;
mask = 0xFFFFE0;
} else {
// unconditional branch
v = (reinterpret_cast<uint8_t*>(value) - instruction) >> 2;
mask = 0x3FFFFFF;
}
} else {
v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
mask = 0xFFFFFF;
}
expect(s, bounded(0, 8, v));
*p = (v & mask) | ((~mask) & *p);
return instruction + 4;
return instruction + InstructionSize;
}
ConstantPoolEntry::ConstantPoolEntry(Context* con,
@ -214,6 +232,101 @@ void appendPoolEvent(Context* con,
b->poolEventTail = e;
}
bool needJump(MyBlock* b)
{
return b->next or b->size != (b->size & PoolOffsetMask);
}
unsigned padding(MyBlock* b, unsigned offset)
{
unsigned total = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
if (e->offset <= offset) {
if (needJump(b)) {
total += vm::TargetBytesPerWord;
}
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
total += vm::TargetBytesPerWord;
}
} else {
break;
}
}
return total;
}
void resolve(MyBlock* b)
{
Context* con = b->context;
if (b->poolOffsetHead) {
if (con->poolOffsetTail) {
con->poolOffsetTail->next = b->poolOffsetHead;
} else {
con->poolOffsetHead = b->poolOffsetHead;
}
con->poolOffsetTail = b->poolOffsetTail;
}
if (con->poolOffsetHead) {
bool append;
if (b->next == 0 or b->next->poolEventHead) {
append = true;
} else {
int32_t v
= (b->start + b->size + b->next->size + vm::TargetBytesPerWord - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
append = (v != (v & PoolOffsetMask));
if (DebugPool) {
fprintf(stderr,
"current %p %d %d next %p %d %d\n",
b,
b->start,
b->size,
b->next,
b->start + b->size,
b->next->size);
fprintf(stderr,
"offset %p %d is of distance %d to next block; append? %d\n",
con->poolOffsetHead,
con->poolOffsetHead->offset,
v,
append);
}
}
if (append) {
#ifndef NDEBUG
int32_t v
= (b->start + b->size - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
expect(con, v == (v & PoolOffsetMask));
#endif // not NDEBUG
appendPoolEvent(
con, b, b->size, con->poolOffsetHead, con->poolOffsetTail);
if (DebugPool) {
for (PoolOffset* o = con->poolOffsetHead; o; o = o->next) {
fprintf(stderr,
"include %p %d in pool event %p at offset %d in block %p\n",
o,
o->offset,
b->poolEventTail,
b->size,
b);
}
}
con->poolOffsetHead = 0;
con->poolOffsetTail = 0;
}
}
}
} // namespace arm
} // namespace codegen
} // namespace avian

View File

@ -27,7 +27,7 @@ namespace arm {
const bool DebugPool = false;
const int32_t PoolOffsetMask = 0xFFF;
const int32_t PoolOffsetMask = vm::TargetBytesPerWord == 8 ? 0x1FFFFF : 0xFFF;
class Task {
public:

View File

@ -22,35 +22,35 @@ using namespace util;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
lir::Operand::Type operand1,
lir::Operand::Type operand2)
{
return operation + (lir::BinaryOperationCount * operand1)
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
return operation + (lir::BinaryOperationCount * (unsigned)operand1)
+ (lir::BinaryOperationCount * lir::Operand::TypeCount * (unsigned)operand2);
}
unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1)
lir::Operand::Type operand1)
{
assertT(con, not isBranch(operation));
return operation + (lir::NonBranchTernaryOperationCount * operand1);
return operation + (lir::NonBranchTernaryOperationCount * (unsigned)operand1);
}
unsigned branchIndex(ArchitectureContext* con UNUSED,
lir::OperandType operand1,
lir::OperandType operand2)
lir::Operand::Type operand1,
lir::Operand::Type operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
}
void populateTables(ArchitectureContext* con)
{
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
const lir::Operand::Type C = lir::Operand::Type::Constant;
const lir::Operand::Type A = lir::Operand::Type::Address;
const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
const lir::Operand::Type M = lir::Operand::Type::Memory;
OperationType* zo = con->operations;
UnaryOperationType* uo = con->unaryOperations;

View File

@ -25,16 +25,16 @@ namespace arm {
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
lir::Operand::Type operand1,
lir::Operand::Type operand2);
unsigned index(ArchitectureContext* con UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1);
lir::Operand::Type operand1);
unsigned branchIndex(ArchitectureContext* con UNUSED,
lir::OperandType operand1,
lir::OperandType operand2);
lir::Operand::Type operand1,
lir::Operand::Type operand2);
void populateTables(ArchitectureContext* con);

View File

@ -25,17 +25,17 @@ class Context;
// shortcut functions
inline int newTemp(Context* con)
inline Register newTemp(Context* con)
{
return con->client->acquireTemporary(GPR_MASK);
}
inline int newTemp(Context* con, unsigned mask)
inline Register newTemp(Context* con, RegisterMask mask)
{
return con->client->acquireTemporary(mask);
}
inline void freeTemp(Context* con, int r)
inline void freeTemp(Context* con, Register r)
{
con->client->releaseTemporary(r);
}
@ -45,67 +45,67 @@ inline int64_t getValue(lir::Constant* con)
return con->value->value();
}
inline lir::Register makeTemp(Context* con)
inline lir::RegisterPair makeTemp(Context* con)
{
lir::Register tmp(newTemp(con));
lir::RegisterPair tmp(newTemp(con));
return tmp;
}
inline lir::Register makeTemp64(Context* con)
inline lir::RegisterPair makeTemp64(Context* con)
{
lir::Register tmp(newTemp(con), newTemp(con));
lir::RegisterPair tmp(newTemp(con), newTemp(con));
return tmp;
}
inline void freeTemp(Context* con, const lir::Register& tmp)
inline void freeTemp(Context* con, const lir::RegisterPair& tmp)
{
if (tmp.low != lir::NoRegister)
if (tmp.low != NoRegister)
freeTemp(con, tmp.low);
if (tmp.high != lir::NoRegister)
if (tmp.high != NoRegister)
freeTemp(con, tmp.high);
}
void shiftLeftR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void moveRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void shiftLeftC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* b,
lir::RegisterPair* t);
void shiftRightR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void shiftRightC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* b,
lir::RegisterPair* t);
void unsignedShiftRightR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void unsignedShiftRightC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* b,
lir::RegisterPair* t);
bool needJump(MyBlock* b);
@ -113,133 +113,133 @@ unsigned padding(MyBlock* b, unsigned offset);
void resolve(MyBlock* b);
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target);
void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
void swapRR(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void moveZRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned,
lir::Register* dst);
lir::RegisterPair* dst);
void moveCR(Context* con,
unsigned size,
lir::Constant* src,
unsigned,
lir::Register* dst);
lir::RegisterPair* dst);
void moveCR2(Context* con,
unsigned size,
lir::Constant* src,
lir::Register* dst,
lir::RegisterPair* dst,
Promise* callOffset);
void moveCR(Context* con,
unsigned size,
lir::Constant* src,
unsigned,
lir::Register* dst);
lir::RegisterPair* dst);
void addR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void subR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void addC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* b,
lir::RegisterPair* dst);
void subC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* b,
lir::RegisterPair* dst);
void multiplyR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void floatAbsoluteRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b);
lir::RegisterPair* b);
void floatNegateRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b);
lir::RegisterPair* b);
void float2FloatRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b);
lir::RegisterPair* b);
void float2IntRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b);
lir::RegisterPair* b);
void int2FloatRR(Context* con,
unsigned,
lir::Register* a,
lir::RegisterPair* a,
unsigned size,
lir::Register* b);
lir::RegisterPair* b);
void floatSqrtRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b);
lir::RegisterPair* b);
void floatAddR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void floatSubtractR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void floatMultiplyR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
void floatDivideR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t);
int normalize(Context* con,
int offset,
@ -250,7 +250,7 @@ int normalize(Context* con,
void store(Context* con,
unsigned size,
lir::Register* src,
lir::RegisterPair* src,
int base,
int offset,
int index,
@ -259,7 +259,7 @@ void store(Context* con,
void moveRM(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize UNUSED,
lir::Memory* dst);
@ -270,7 +270,7 @@ void load(Context* con,
int index,
unsigned scale,
unsigned dstSize,
lir::Register* dst,
lir::RegisterPair* dst,
bool preserveIndex,
bool signExtend);
@ -278,61 +278,61 @@ void moveMR(Context* con,
unsigned srcSize,
lir::Memory* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void moveZMR(Context* con,
unsigned srcSize,
lir::Memory* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void andR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst);
void andC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* b,
lir::RegisterPair* dst);
void orR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst);
void xorR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst);
void moveAR2(Context* con,
unsigned srcSize,
lir::Address* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void moveAR(Context* con,
unsigned srcSize,
lir::Address* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void compareRR(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void compareCR(Context* con,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void compareCM(Context* con,
unsigned aSize,
@ -342,7 +342,7 @@ void compareCM(Context* con,
void compareRM(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Memory* b);
@ -365,21 +365,21 @@ void branchLong(Context* con,
void branchRR(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::Constant* target);
void branchCR(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::RegisterPair* b,
lir::Constant* target);
void branchRM(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
lir::Memory* b,
lir::Constant* target);
@ -403,11 +403,11 @@ void moveCM(Context* con,
void negateRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize UNUSED,
lir::Register* dst);
lir::RegisterPair* dst);
void callR(Context* con, unsigned size UNUSED, lir::Register* target);
void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
void callC(Context* con, unsigned size UNUSED, lir::Constant* target);

View File

@ -15,6 +15,8 @@
#include "fixup.h"
#include "multimethod.h"
#if TARGET_BYTES_PER_WORD == 4
namespace avian {
namespace codegen {
namespace arm {
@ -35,20 +37,20 @@ inline unsigned lo8(int64_t i)
void andC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst);
lir::RegisterPair* b,
lir::RegisterPair* dst);
void shiftLeftR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
ResolvedPromise maskPromise(0x3F);
lir::Constant mask(&maskPromise);
lir::Register dst(tmp3);
lir::RegisterPair dst(tmp3);
andC(con, 4, &mask, a, &dst);
emit(con, lsl(tmp1, b->high, tmp3));
emit(con, rsbi(tmp2, tmp3, 32));
@ -61,10 +63,10 @@ void shiftLeftR(Context* con,
freeTemp(con, tmp2);
freeTemp(con, tmp3);
} else {
int tmp = newTemp(con);
Register tmp = newTemp(con);
ResolvedPromise maskPromise(0x1F);
lir::Constant mask(&maskPromise);
lir::Register dst(tmp);
lir::RegisterPair dst(tmp);
andC(con, size, &mask, a, &dst);
emit(con, lsl(t->low, b->low, tmp));
freeTemp(con, tmp);
@ -73,15 +75,15 @@ void shiftLeftR(Context* con,
void moveRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize,
lir::Register* dst);
lir::RegisterPair* dst);
void shiftLeftC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* b,
lir::RegisterPair* t)
{
assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) {
@ -93,15 +95,15 @@ void shiftLeftC(Context* con,
void shiftRightR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
ResolvedPromise maskPromise(0x3F);
lir::Constant mask(&maskPromise);
lir::Register dst(tmp3);
lir::RegisterPair dst(tmp3);
andC(con, 4, &mask, a, &dst);
emit(con, lsr(tmp1, b->low, tmp3));
emit(con, rsbi(tmp2, tmp3, 32));
@ -114,10 +116,10 @@ void shiftRightR(Context* con,
freeTemp(con, tmp2);
freeTemp(con, tmp3);
} else {
int tmp = newTemp(con);
Register tmp = newTemp(con);
ResolvedPromise maskPromise(0x1F);
lir::Constant mask(&maskPromise);
lir::Register dst(tmp);
lir::RegisterPair dst(tmp);
andC(con, size, &mask, a, &dst);
emit(con, asr(t->low, b->low, tmp));
freeTemp(con, tmp);
@ -127,8 +129,8 @@ void shiftRightR(Context* con,
void shiftRightC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* b,
lir::RegisterPair* t)
{
assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) {
@ -140,18 +142,18 @@ void shiftRightC(Context* con,
void unsignedShiftRightR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
int tmpShift = newTemp(con);
Register tmpShift = newTemp(con);
ResolvedPromise maskPromise(size == 8 ? 0x3F : 0x1F);
lir::Constant mask(&maskPromise);
lir::Register dst(tmpShift);
lir::RegisterPair dst(tmpShift);
andC(con, 4, &mask, a, &dst);
emit(con, lsr(t->low, b->low, tmpShift));
if (size == 8) {
int tmpHi = newTemp(con), tmpLo = newTemp(con);
Register tmpHi = newTemp(con), tmpLo = newTemp(con);
emit(con, SETS(rsbi(tmpHi, tmpShift, 32)));
emit(con, lsl(tmpLo, b->high, tmpHi));
emit(con, orr(t->low, t->low, tmpLo));
@ -168,8 +170,8 @@ void unsignedShiftRightR(Context* con,
void unsignedShiftRightC(Context* con,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* b,
lir::RegisterPair* t)
{
assertT(con, size == vm::TargetBytesPerWord);
if (getValue(a) & 0x1F) {
@ -179,102 +181,7 @@ void unsignedShiftRightC(Context* con,
}
}
bool needJump(MyBlock* b)
{
return b->next or b->size != (b->size & PoolOffsetMask);
}
unsigned padding(MyBlock* b, unsigned offset)
{
unsigned total = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
if (e->offset <= offset) {
if (needJump(b)) {
total += vm::TargetBytesPerWord;
}
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
total += vm::TargetBytesPerWord;
}
} else {
break;
}
}
return total;
}
void resolve(MyBlock* b)
{
Context* con = b->context;
if (b->poolOffsetHead) {
if (con->poolOffsetTail) {
con->poolOffsetTail->next = b->poolOffsetHead;
} else {
con->poolOffsetHead = b->poolOffsetHead;
}
con->poolOffsetTail = b->poolOffsetTail;
}
if (con->poolOffsetHead) {
bool append;
if (b->next == 0 or b->next->poolEventHead) {
append = true;
} else {
int32_t v
= (b->start + b->size + b->next->size + vm::TargetBytesPerWord - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
append = (v != (v & PoolOffsetMask));
if (DebugPool) {
fprintf(stderr,
"current %p %d %d next %p %d %d\n",
b,
b->start,
b->size,
b->next,
b->start + b->size,
b->next->size);
fprintf(stderr,
"offset %p %d is of distance %d to next block; append? %d\n",
con->poolOffsetHead,
con->poolOffsetHead->offset,
v,
append);
}
}
if (append) {
#ifndef NDEBUG
int32_t v
= (b->start + b->size - 8)
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
expect(con, v == (v & PoolOffsetMask));
#endif // not NDEBUG
appendPoolEvent(
con, b, b->size, con->poolOffsetHead, con->poolOffsetTail);
if (DebugPool) {
for (PoolOffset* o = con->poolOffsetHead; o; o = o->next) {
fprintf(stderr,
"include %p %d in pool event %p at offset %d in block %p\n",
o,
o->offset,
b->poolEventTail,
b->size,
b);
}
}
con->poolOffsetHead = 0;
con->poolOffsetTail = 0;
}
}
}
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
{
assertT(con, size == vm::TargetBytesPerWord);
emit(con, bx(target->low));
@ -282,14 +189,14 @@ void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
void swapRR(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(con, aSize == vm::TargetBytesPerWord);
assertT(con, bSize == vm::TargetBytesPerWord);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveRR(con, aSize, a, bSize, &tmp);
moveRR(con, bSize, b, aSize, a);
moveRR(con, bSize, &tmp, bSize, b);
@ -298,9 +205,9 @@ void swapRR(Context* con,
void moveRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize,
lir::Register* dst)
lir::RegisterPair* dst)
{
bool srcIsFpr = isFpr(src);
bool dstIsFpr = isFpr(dst);
@ -343,8 +250,8 @@ void moveRR(Context* con,
moveRR(con, 4, src, 4, dst);
emit(con, asri(dst->high, src->low, 31));
} else if (srcSize == 8 and dstSize == 8) {
lir::Register srcHigh(src->high);
lir::Register dstHigh(dst->high);
lir::RegisterPair srcHigh(src->high);
lir::RegisterPair dstHigh(dst->high);
if (src->high == dst->low) {
if (src->low == dst->high) {
@ -369,9 +276,9 @@ void moveRR(Context* con,
void moveZRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned,
lir::Register* dst)
lir::RegisterPair* dst)
{
switch (srcSize) {
case 2:
@ -388,16 +295,16 @@ void moveCR(Context* con,
unsigned size,
lir::Constant* src,
unsigned,
lir::Register* dst);
lir::RegisterPair* dst);
void moveCR2(Context* con,
unsigned size,
lir::Constant* src,
lir::Register* dst,
lir::RegisterPair* dst,
Promise* callOffset)
{
if (isFpr(dst)) { // floating-point
lir::Register tmp = size > 4 ? makeTemp64(con) : makeTemp(con);
lir::RegisterPair tmp = size > 4 ? makeTemp64(con) : makeTemp(con);
moveCR(con, size, src, size, &tmp);
moveRR(con, size, &tmp, size, dst);
freeTemp(con, tmp);
@ -407,10 +314,11 @@ void moveCR2(Context* con,
lir::Constant srcLo(&loBits);
ResolvedPromise hiBits(value >> 32);
lir::Constant srcHi(&hiBits);
lir::Register dstHi(dst->high);
lir::RegisterPair dstHi(dst->high);
moveCR(con, 4, &srcLo, 4, dst);
moveCR(con, 4, &srcHi, 4, &dstHi);
} else if (src->value->resolved() and isOfWidth(getValue(src), 8)) {
} else if (callOffset == 0 and src->value->resolved()
and isOfWidth(getValue(src), 8)) {
emit(con, movi(dst->low, lo8(getValue(src)))); // fits in immediate
} else {
appendConstantPoolEntry(con, src->value, callOffset);
@ -422,16 +330,16 @@ void moveCR(Context* con,
unsigned size,
lir::Constant* src,
unsigned,
lir::Register* dst)
lir::RegisterPair* dst)
{
moveCR2(con, size, src, dst, 0);
}
void addR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, SETS(add(t->low, a->low, b->low)));
@ -443,9 +351,9 @@ void addR(Context* con,
void subR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, SETS(rsb(t->low, a->low, b->low)));
@ -458,8 +366,8 @@ void subR(Context* con,
void addC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
assertT(con, size == vm::TargetBytesPerWord);
@ -481,8 +389,8 @@ void addC(Context* con,
void subC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
assertT(con, size == vm::TargetBytesPerWord);
@ -503,16 +411,16 @@ void subC(Context* con,
void multiplyR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
bool useTemporaries = b->low == t->low;
int tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->low;
int tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->high;
Register tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->low;
Register tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
: t->high;
emit(con, umull(tmpLow, tmpHigh, a->low, b->low));
emit(con, mla(tmpHigh, a->low, b->high, tmpHigh));
@ -531,9 +439,9 @@ void multiplyR(Context* con,
void floatAbsoluteRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b)
lir::RegisterPair* b)
{
if (size == 8) {
emit(con, fabsd(fpr64(b), fpr64(a)));
@ -544,9 +452,9 @@ void floatAbsoluteRR(Context* con,
void floatNegateRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b)
lir::RegisterPair* b)
{
if (size == 8) {
emit(con, fnegd(fpr64(b), fpr64(a)));
@ -557,9 +465,9 @@ void floatNegateRR(Context* con,
void float2FloatRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b)
lir::RegisterPair* b)
{
if (size == 8) {
emit(con, fcvtsd(fpr32(b), fpr64(a)));
@ -570,11 +478,11 @@ void float2FloatRR(Context* con,
void float2IntRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b)
lir::RegisterPair* b)
{
int tmp = newTemp(con, FPR_MASK);
Register tmp = newTemp(con, FPR_MASK);
int ftmp = fpr32(tmp);
if (size == 8) { // double to int
emit(con, ftosizd(ftmp, fpr64(a)));
@ -587,9 +495,9 @@ void float2IntRR(Context* con,
void int2FloatRR(Context* con,
unsigned,
lir::Register* a,
lir::RegisterPair* a,
unsigned size,
lir::Register* b)
lir::RegisterPair* b)
{
emit(con, fmsr(fpr32(b), a->low));
if (size == 8) { // int to double
@ -601,9 +509,9 @@ void int2FloatRR(Context* con,
void floatSqrtRR(Context* con,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
unsigned,
lir::Register* b)
lir::RegisterPair* b)
{
if (size == 8) {
emit(con, fsqrtd(fpr64(b), fpr64(a)));
@ -614,9 +522,9 @@ void floatSqrtRR(Context* con,
void floatAddR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, faddd(fpr64(t), fpr64(a), fpr64(b)));
@ -627,9 +535,9 @@ void floatAddR(Context* con,
void floatSubtractR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, fsubd(fpr64(t), fpr64(b), fpr64(a)));
@ -640,9 +548,9 @@ void floatSubtractR(Context* con,
void floatMultiplyR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, fmuld(fpr64(t), fpr64(a), fpr64(b)));
@ -653,9 +561,9 @@ void floatMultiplyR(Context* con,
void floatDivideR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* t)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* t)
{
if (size == 8) {
emit(con, fdivd(fpr64(t), fpr64(b), fpr64(a)));
@ -664,15 +572,15 @@ void floatDivideR(Context* con,
}
}
int normalize(Context* con,
int offset,
int index,
unsigned scale,
bool* preserveIndex,
bool* release)
Register normalize(Context* con,
int offset,
Register index,
unsigned scale,
bool* preserveIndex,
bool* release)
{
if (offset != 0 or scale != 1) {
lir::Register normalizedIndex(
lir::RegisterPair normalizedIndex(
*preserveIndex ? con->client->acquireTemporary(GPR_MASK) : index);
if (*preserveIndex) {
@ -682,10 +590,10 @@ int normalize(Context* con,
*release = false;
}
int scaled;
Register scaled;
if (scale != 1) {
lir::Register unscaledIndex(index);
lir::RegisterPair unscaledIndex(index);
ResolvedPromise scalePromise(log(scale));
lir::Constant scaleConstant(&scalePromise);
@ -702,12 +610,12 @@ int normalize(Context* con,
}
if (offset != 0) {
lir::Register untranslatedIndex(scaled);
lir::RegisterPair untranslatedIndex(scaled);
ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con,
vm::TargetBytesPerWord,
&offsetConstant,
@ -730,16 +638,16 @@ int normalize(Context* con,
void store(Context* con,
unsigned size,
lir::Register* src,
int base,
lir::RegisterPair* src,
Register base,
int offset,
int index,
Register index,
unsigned scale,
bool preserveIndex)
{
if (index != lir::NoRegister) {
if (index != NoRegister) {
bool release;
int normalized
Register normalized
= normalize(con, offset, index, scale, &preserveIndex, &release);
if (!isFpr(src)) { // GPR store
@ -757,7 +665,7 @@ void store(Context* con,
break;
case 8: { // split into 2 32-bit stores
lir::Register srcHigh(src->high);
lir::RegisterPair srcHigh(src->high);
store(con, 4, &srcHigh, base, 0, normalized, 1, preserveIndex);
store(con, 4, src, base, 4, normalized, 1, preserveIndex);
} break;
@ -766,7 +674,7 @@ void store(Context* con,
abort(con);
}
} else { // FPR store
lir::Register base_(base), normalized_(normalized),
lir::RegisterPair base_(base), normalized_(normalized),
absAddr = makeTemp(con);
// FPR stores have only bases, so we must add the index
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
@ -798,9 +706,9 @@ void store(Context* con,
break;
case 8: { // split into 2 32-bit stores
lir::Register srcHigh(src->high);
store(con, 4, &srcHigh, base, offset, lir::NoRegister, 1, false);
store(con, 4, src, base, offset + 4, lir::NoRegister, 1, false);
lir::RegisterPair srcHigh(src->high);
store(con, 4, &srcHigh, base, offset, NoRegister, 1, false);
store(con, 4, src, base, offset + 4, NoRegister, 1, false);
} break;
default:
@ -815,7 +723,7 @@ void store(Context* con,
emit(con, fsts(fpr32(src), base, offset));
}
} else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise);
moveCR(con,
@ -832,7 +740,7 @@ void store(Context* con,
void moveRM(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize UNUSED,
lir::Memory* dst)
{
@ -844,18 +752,18 @@ void moveRM(Context* con,
void load(Context* con,
unsigned srcSize,
int base,
Register base,
int offset,
int index,
Register index,
unsigned scale,
unsigned dstSize,
lir::Register* dst,
lir::RegisterPair* dst,
bool preserveIndex,
bool signExtend)
{
if (index != lir::NoRegister) {
if (index != NoRegister) {
bool release;
int normalized
Register normalized
= normalize(con, offset, index, scale, &preserveIndex, &release);
if (!isFpr(dst)) { // GPR load
@ -882,7 +790,7 @@ void load(Context* con,
load(con, 4, base, 0, normalized, 1, 4, dst, preserveIndex, false);
moveRR(con, 4, dst, 8, dst);
} else if (srcSize == 8 and dstSize == 8) {
lir::Register dstHigh(dst->high);
lir::RegisterPair dstHigh(dst->high);
load(con,
4,
base,
@ -903,7 +811,7 @@ void load(Context* con,
abort(con);
}
} else { // FPR load
lir::Register base_(base), normalized_(normalized),
lir::RegisterPair base_(base), normalized_(normalized),
absAddr = makeTemp(con);
// VFP loads only have bases, so we must add the index
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
@ -946,27 +854,9 @@ void load(Context* con,
case 8: {
if (dstSize == 8) {
lir::Register dstHigh(dst->high);
load(con,
4,
base,
offset,
lir::NoRegister,
1,
4,
&dstHigh,
false,
false);
load(con,
4,
base,
offset + 4,
lir::NoRegister,
1,
4,
dst,
false,
false);
lir::RegisterPair dstHigh(dst->high);
load(con, 4, base, offset, NoRegister, 1, 4, &dstHigh, false, false);
load(con, 4, base, offset + 4, NoRegister, 1, 4, dst, false, false);
} else {
emit(con, ldri(dst->low, base, offset));
}
@ -984,7 +874,7 @@ void load(Context* con,
emit(con, flds(fpr32(dst), base, offset));
}
} else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
ResolvedPromise offsetPromise(offset);
lir::Constant offsetConstant(&offsetPromise);
moveCR(con,
@ -1003,7 +893,7 @@ void moveMR(Context* con,
unsigned srcSize,
lir::Memory* src,
unsigned dstSize,
lir::Register* dst)
lir::RegisterPair* dst)
{
load(con,
srcSize,
@ -1021,7 +911,7 @@ void moveZMR(Context* con,
unsigned srcSize,
lir::Memory* src,
unsigned dstSize,
lir::Register* dst)
lir::RegisterPair* dst)
{
load(con,
srcSize,
@ -1037,9 +927,9 @@ void moveZMR(Context* con,
void andR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
if (size == 8)
emit(con, and_(dst->high, a->high, b->high));
@ -1049,8 +939,8 @@ void andR(Context* con,
void andC(Context* con,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
int64_t v = a->value->value();
@ -1061,8 +951,8 @@ void andC(Context* con,
ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low);
lir::Register bh(b->high);
lir::Register dh(dst->high);
lir::RegisterPair bh(b->high);
lir::RegisterPair dh(dst->high);
andC(con, 4, &al, b, dst);
andC(con, 4, &ah, &bh, &dh);
@ -1078,7 +968,7 @@ void andC(Context* con,
// instruction
bool useTemporary = b->low == dst->low;
lir::Register tmp(dst->low);
lir::RegisterPair tmp(dst->low);
if (useTemporary) {
tmp.low = con->client->acquireTemporary(GPR_MASK);
}
@ -1098,9 +988,9 @@ void andC(Context* con,
void orR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
if (size == 8)
emit(con, orr(dst->high, a->high, b->high));
@ -1109,9 +999,9 @@ void orR(Context* con,
void xorR(Context* con,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::Register* dst)
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::RegisterPair* dst)
{
if (size == 8)
emit(con, eor(dst->high, a->high, b->high));
@ -1122,14 +1012,14 @@ void moveAR2(Context* con,
unsigned srcSize,
lir::Address* src,
unsigned dstSize,
lir::Register* dst)
lir::RegisterPair* dst)
{
assertT(con, srcSize == 4 and dstSize == 4);
lir::Constant constant(src->address);
moveCR(con, srcSize, &constant, dstSize, dst);
lir::Memory memory(dst->low, 0, -1, 0);
lir::Memory memory(dst->low, 0, NoRegister, 0);
moveMR(con, dstSize, &memory, dstSize, dst);
}
@ -1137,16 +1027,16 @@ void moveAR(Context* con,
unsigned srcSize,
lir::Address* src,
unsigned dstSize,
lir::Register* dst)
lir::RegisterPair* dst)
{
moveAR2(con, srcSize, src, dstSize, dst);
}
void compareRR(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
@ -1168,14 +1058,14 @@ void compareCR(Context* con,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(con, aSize == 4 and bSize == 4);
if (!isFpr(b) && a->value->resolved() && isOfWidth(a->value->value(), 8)) {
emit(con, cmpi(b->low, a->value->value()));
} else {
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con, aSize, a, bSize, &tmp);
compareRR(con, bSize, &tmp, bSize, b);
con->client->releaseTemporary(tmp.low);
@ -1190,7 +1080,7 @@ void compareCM(Context* con,
{
assertT(con, aSize == 4 and bSize == 4);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveMR(con, bSize, b, bSize, &tmp);
compareCR(con, aSize, a, bSize, &tmp);
con->client->releaseTemporary(tmp.low);
@ -1198,13 +1088,13 @@ void compareCM(Context* con,
void compareRM(Context* con,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Memory* b)
{
assertT(con, aSize == 4 and bSize == 4);
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveMR(con, bSize, b, bSize, &tmp);
compareRR(con, aSize, a, bSize, &tmp);
con->client->releaseTemporary(tmp.low);
@ -1352,13 +1242,13 @@ void branchLong(Context* con,
void branchRR(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::Constant* target)
{
if (!isFpr(a) && size > vm::TargetBytesPerWord) {
lir::Register ah(a->high);
lir::Register bh(b->high);
lir::RegisterPair ah(a->high);
lir::RegisterPair bh(b->high);
branchLong(
con, op, a, &ah, b, &bh, target, CAST2(compareRR), CAST2(compareRR));
@ -1372,7 +1262,7 @@ void branchCR(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::RegisterPair* b,
lir::Constant* target)
{
assertT(con, !isFloatBranch(op));
@ -1386,7 +1276,7 @@ void branchCR(Context* con,
ResolvedPromise high((v >> 32) & ~static_cast<vm::target_uintptr_t>(0));
lir::Constant ah(&high);
lir::Register bh(b->high);
lir::RegisterPair bh(b->high);
branchLong(
con, op, &al, &ah, b, &bh, target, CAST2(compareCR), CAST2(compareCR));
@ -1399,7 +1289,7 @@ void branchCR(Context* con,
void branchRM(Context* con,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
lir::Memory* b,
lir::Constant* target)
{
@ -1450,7 +1340,7 @@ void moveCM(Context* con,
} break;
default:
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
moveCR(con, srcSize, src, dstSize, &tmp);
moveRM(con, dstSize, &tmp, dstSize, dst);
con->client->releaseTemporary(tmp.low);
@ -1459,9 +1349,9 @@ void moveCM(Context* con,
void negateRR(Context* con,
unsigned srcSize,
lir::Register* src,
lir::RegisterPair* src,
unsigned dstSize UNUSED,
lir::Register* dst)
lir::RegisterPair* dst)
{
assertT(con, srcSize == dstSize);
@ -1473,7 +1363,7 @@ void negateRR(Context* con,
}
}
void callR(Context* con, unsigned size UNUSED, lir::Register* target)
void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
{
assertT(con, size == vm::TargetBytesPerWord);
emit(con, blx(target->low));
@ -1491,20 +1381,31 @@ void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
{
assertT(con, size == vm::TargetBytesPerWord);
lir::Register tmp(4);
lir::RegisterPair tmp(Register(4));
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
callR(con, vm::TargetBytesPerWord, &tmp);
}
void alignedLongCallC(Context* con, unsigned size, lir::Constant* target)
{
longCallC(con, size, target);
}
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
{
assertT(con, size == vm::TargetBytesPerWord);
lir::Register tmp(4); // a non-arg reg that we don't mind clobbering
lir::RegisterPair tmp(
Register(4)); // a non-arg reg that we don't mind clobbering
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
jumpR(con, vm::TargetBytesPerWord, &tmp);
}
void alignedLongJumpC(Context* con, unsigned size, lir::Constant* target)
{
longJumpC(con, size, target);
}
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
{
assertT(con, size == vm::TargetBytesPerWord);
@ -1554,3 +1455,5 @@ void storeLoadBarrier(Context* con)
} // namespace arm
} // namespace codegen
} // namespace avian
#endif // TARGET_BYTES_PER_WORD == 4

File diff suppressed because it is too large Load Diff

View File

@ -14,48 +14,61 @@
#include <avian/codegen/lir.h>
#include <avian/codegen/assembler.h>
#include "avian/environment.h"
namespace avian {
namespace codegen {
namespace arm {
const uint64_t MASK_LO32 = 0xffffffff;
const unsigned MASK_LO16 = 0xffff;
const unsigned MASK_LO8 = 0xff;
#if TARGET_BYTES_PER_WORD == 8
constexpr Register ThreadRegister(19);
constexpr Register StackRegister(31);
constexpr Register LinkRegister(30);
constexpr Register FrameRegister(29);
constexpr Register ProgramCounter(0xFE); // i.e. unaddressable
const int N_GPRS = 32;
const int N_FPRS = 32;
const RegisterMask GPR_MASK = 0xffffffff;
const RegisterMask FPR_MASK = 0xffffffff00000000;
#else
constexpr Register ThreadRegister(8);
constexpr Register StackRegister(13);
constexpr Register LinkRegister(14);
constexpr Register FrameRegister(0xFE); // i.e. there is none
constexpr Register ProgramCounter(15);
const int N_GPRS = 16;
const int N_FPRS = 16;
const uint32_t GPR_MASK = 0xffff;
const uint32_t FPR_MASK = 0xffff0000;
const RegisterMask GPR_MASK = 0xffff;
const RegisterMask FPR_MASK = 0xffff0000;
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32;
const uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
inline bool isFpr(lir::Register* reg)
inline int fpr64(Register reg)
{
return reg->low >= N_GPRS;
return reg.index() - N_GPRS;
}
inline int fpr64(int reg)
{
return reg - N_GPRS;
}
inline int fpr64(lir::Register* reg)
inline int fpr64(lir::RegisterPair* reg)
{
return fpr64(reg->low);
}
inline int fpr32(int reg)
inline int fpr32(Register reg)
{
return fpr64(reg) << 1;
}
inline int fpr32(lir::Register* reg)
inline int fpr32(lir::RegisterPair* reg)
{
return fpr64(reg) << 1;
}
#endif
const int ThreadRegister = 8;
const int StackRegister = 13;
const int LinkRegister = 14;
const int ProgramCounter = 15;
inline bool isFpr(lir::RegisterPair* reg)
{
return reg->low.index() >= N_GPRS;
}
} // namespace arm
} // namespace codegen

View File

@ -17,9 +17,9 @@ namespace codegen {
class Multimethod {
public:
inline static unsigned index(lir::UnaryOperation operation,
lir::OperandType operand)
lir::Operand::Type operand)
{
return operation + (lir::UnaryOperationCount * operand);
return operation + (lir::UnaryOperationCount * (unsigned)operand);
}
};

View File

@ -183,37 +183,37 @@ class MyArchitecture : public Architecture {
return &myRegisterFile;
}
virtual int scratch()
virtual Register scratch()
{
return rax;
}
virtual int stack()
virtual Register stack()
{
return rsp;
}
virtual int thread()
virtual Register thread()
{
return rbx;
}
virtual int returnLow()
virtual Register returnLow()
{
return rax;
}
virtual int returnHigh()
virtual Register returnHigh()
{
return (TargetBytesPerWord == 4 ? rdx : lir::NoRegister);
return (TargetBytesPerWord == 4 ? rdx : NoRegister);
}
virtual int virtualCallTarget()
virtual Register virtualCallTarget()
{
return rax;
}
virtual int virtualCallIndex()
virtual Register virtualCallIndex()
{
return rdx;
}
@ -233,14 +233,14 @@ class MyArchitecture : public Architecture {
return 0x7FFFFFFF;
}
virtual bool reserved(int register_)
virtual bool reserved(Register register_)
{
switch (register_) {
case rbp:
switch (register_.index()) {
case rbp.index():
return UseFramePointer;
case rsp:
case rbx:
case rsp.index():
case rbx.index():
return true;
default:
@ -289,7 +289,7 @@ class MyArchitecture : public Architecture {
return 0;
}
virtual int argumentRegister(unsigned index)
virtual Register argumentRegister(unsigned index)
{
assertT(&c, TargetBytesPerWord == 8);
switch (index) {
@ -501,8 +501,8 @@ class MyArchitecture : public Architecture {
OperandMask& aMask,
bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand)
| (1 << lir::ConstantOperand);
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask
| lir::Operand::ConstantMask;
*thunk = false;
}
@ -512,22 +512,20 @@ class MyArchitecture : public Architecture {
unsigned bSize,
bool* thunk)
{
aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
*thunk = false;
switch (op) {
case lir::Negate:
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax);
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(rax, rdx);
break;
case lir::Absolute:
if (aSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(1) << rax);
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(rax, 0);
} else {
*thunk = true;
}
@ -535,9 +533,8 @@ class MyArchitecture : public Architecture {
case lir::FloatAbsolute:
if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
} else {
*thunk = true;
}
@ -546,8 +543,8 @@ class MyArchitecture : public Architecture {
case lir::FloatNegate:
// floatNegateRR does not support doubles
if (useSSE(&c) and aSize == 4 and bSize == 4) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
} else {
*thunk = true;
}
@ -555,10 +552,9 @@ class MyArchitecture : public Architecture {
case lir::FloatSquareRoot:
if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
} else {
*thunk = true;
}
@ -566,10 +562,9 @@ class MyArchitecture : public Architecture {
case lir::Float2Float:
if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
} else {
*thunk = true;
}
@ -581,10 +576,9 @@ class MyArchitecture : public Architecture {
// thunks or produce inline machine code which handles edge
// cases properly.
if (false and useSSE(&c) and bSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
} else {
*thunk = true;
}
@ -592,11 +586,9 @@ class MyArchitecture : public Architecture {
case lir::Int2Float:
if (useSSE(&c) and aSize <= TargetBytesPerWord) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
aMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
} else {
*thunk = true;
}
@ -604,21 +596,20 @@ class MyArchitecture : public Architecture {
case lir::Move:
aMask.typeMask = ~0;
aMask.registerMask = ~static_cast<uint64_t>(0);
aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
const uint32_t mask = GeneralRegisterMask
& ~((1 << rax) | (1 << rdx));
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
const RegisterMask mask = GeneralRegisterMask
.excluding(rax).excluding(rdx);
aMask.setLowHighRegisterMasks(mask, mask);
} else if (aSize == 1 or bSize == 1) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx)
| (1 << rbx);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
const RegisterMask mask = rax | rcx | rdx | rbx;
aMask.setLowHighRegisterMasks(mask, mask);
}
}
break;
@ -635,68 +626,62 @@ class MyArchitecture : public Architecture {
OperandMask& bMask)
{
bMask.typeMask = ~0;
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
switch (op) {
case lir::Absolute:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = (static_cast<uint64_t>(1) << rax);
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(rax, 0);
break;
case lir::FloatAbsolute:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = aMask.registerMask;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
break;
case lir::Negate:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = aMask.registerMask;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
break;
case lir::FloatNegate:
case lir::FloatSquareRoot:
case lir::Float2Float:
case lir::Int2Float:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
break;
case lir::Float2Int:
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.typeMask = lir::Operand::RegisterPairMask;
break;
case lir::Move:
if (aMask.typeMask
& ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) {
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask)
<< 32) | FloatRegisterMask;
} else if (aMask.typeMask & (1 << lir::RegisterOperand)) {
bMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
if (aMask.registerMask & FloatRegisterMask) {
bMask.registerMask = FloatRegisterMask;
& (lir::Operand::MemoryMask | lir::Operand::AddressMask)) {
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(GeneralRegisterMask | FloatRegisterMask, GeneralRegisterMask);
} else if (aMask.typeMask & lir::Operand::RegisterPairMask) {
bMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
if (aMask.lowRegisterMask & FloatRegisterMask) {
bMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
} else {
bMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
}
} else {
bMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
bMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
}
if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) {
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax);
bMask.setLowHighRegisterMasks(rax, rdx);
} else if (aSize == 1 or bSize == 1) {
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx)
| (1 << rbx);
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
const RegisterMask mask = rax | rcx | rdx | rbx;
bMask.setLowHighRegisterMasks(mask, mask);
}
}
break;
@ -712,46 +697,38 @@ class MyArchitecture : public Architecture {
const OperandMask& dstMask)
{
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
tmpMask.setLowHighRegisterMasks(0, 0);
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
if (dstMask.typeMask & lir::Operand::MemoryMask) {
// can't move directly from memory to memory
srcMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::ConstantOperand);
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else if (dstMask.typeMask & (1 << lir::RegisterOperand)) {
srcMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::ConstantMask;
tmpMask.typeMask = lir::Operand::RegisterPairMask;
tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
} else if (dstMask.typeMask & lir::Operand::RegisterPairMask) {
if (size > TargetBytesPerWord) {
// can't move directly from FPR to GPR or vice-versa for
// values larger than the GPR size
if (dstMask.registerMask & FloatRegisterMask) {
srcMask.registerMask
= FloatRegisterMask
| (static_cast<uint64_t>(FloatRegisterMask) << 32);
tmpMask.typeMask = 1 << lir::MemoryOperand;
} else if (dstMask.registerMask & GeneralRegisterMask) {
srcMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
tmpMask.typeMask = 1 << lir::MemoryOperand;
if (dstMask.lowRegisterMask & FloatRegisterMask) {
srcMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
tmpMask.typeMask = lir::Operand::MemoryMask;
} else if (dstMask.lowRegisterMask & GeneralRegisterMask) {
srcMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
tmpMask.typeMask = lir::Operand::MemoryMask;
}
}
if (dstMask.registerMask & FloatRegisterMask) {
if (dstMask.lowRegisterMask & FloatRegisterMask) {
// can't move directly from constant to FPR
srcMask.typeMask &= ~(1 << lir::ConstantOperand);
srcMask.typeMask &= ~lir::Operand::ConstantMask;
if (size > TargetBytesPerWord) {
tmpMask.typeMask = 1 << lir::MemoryOperand;
tmpMask.typeMask = lir::Operand::MemoryMask;
} else {
tmpMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
tmpMask.registerMask
= GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
tmpMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
}
}
}
@ -765,13 +742,11 @@ class MyArchitecture : public Architecture {
unsigned,
bool* thunk)
{
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
bMask.typeMask = lir::Operand::RegisterPairMask;
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
*thunk = false;
@ -781,14 +756,12 @@ class MyArchitecture : public Architecture {
case lir::FloatMultiply:
case lir::FloatDivide:
if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand)
| (1 << lir::MemoryOperand);
bMask.typeMask = (1 << lir::RegisterOperand);
aMask.typeMask = lir::Operand::RegisterPairMask
| lir::Operand::MemoryMask;
bMask.typeMask = lir::Operand::RegisterPairMask;
const uint64_t mask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.registerMask = mask;
bMask.registerMask = mask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
} else {
*thunk = true;
}
@ -800,12 +773,12 @@ class MyArchitecture : public Architecture {
case lir::Multiply:
if (TargetBytesPerWord == 4 and aSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask;
const RegisterMask mask = GeneralRegisterMask .excluding(rax).excluding(rdx);
aMask.setLowHighRegisterMasks(mask, mask);
bMask.setLowHighRegisterMasks(mask, rdx);
} else {
aMask.registerMask = GeneralRegisterMask;
bMask.registerMask = GeneralRegisterMask;
aMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
bMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
}
break;
@ -813,9 +786,9 @@ class MyArchitecture : public Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true;
} else {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
bMask.registerMask = 1 << rax;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
bMask.setLowHighRegisterMasks(rax, 0);
}
break;
@ -823,9 +796,9 @@ class MyArchitecture : public Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true;
} else {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
bMask.registerMask = 1 << rax;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
bMask.setLowHighRegisterMasks(rax, 0);
}
break;
@ -833,14 +806,13 @@ class MyArchitecture : public Architecture {
case lir::ShiftRight:
case lir::UnsignedShiftRight: {
if (TargetBytesPerWord == 4 and bSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
aMask.setLowHighRegisterMasks(mask, mask);
bMask.setLowHighRegisterMasks(mask, mask);
} else {
aMask.registerMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32)
| (static_cast<uint64_t>(1) << rcx);
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.setLowHighRegisterMasks(rcx, GeneralRegisterMask);
const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
bMask.setLowHighRegisterMasks(mask, mask);
}
} break;
@ -855,11 +827,11 @@ class MyArchitecture : public Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (useSSE(&c)) {
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
aMask.typeMask = lir::Operand::RegisterPairMask;
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
bMask.typeMask = aMask.typeMask;
bMask.registerMask = aMask.registerMask;
bMask.lowRegisterMask = aMask.lowRegisterMask;
bMask.highRegisterMask = aMask.highRegisterMask;
} else {
*thunk = true;
}
@ -879,11 +851,12 @@ class MyArchitecture : public Architecture {
OperandMask& cMask)
{
if (isBranch(op)) {
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
cMask.typeMask = lir::Operand::ConstantMask;
cMask.setLowHighRegisterMasks(0, 0);
} else {
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = bMask.registerMask;
cMask.typeMask = lir::Operand::RegisterPairMask;
cMask.lowRegisterMask = bMask.lowRegisterMask;
cMask.highRegisterMask = bMask.highRegisterMask;
}
}
@ -927,7 +900,7 @@ class MyAssembler : public Assembler {
virtual void checkStackOverflow(uintptr_t handler,
unsigned stackLimitOffsetFromThread)
{
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Memory stackLimit(rbx, stackLimitOffsetFromThread);
lir::Constant handlerConstant(resolvedPromise(&c, handler));
branchRM(&c,
@ -940,11 +913,11 @@ class MyAssembler : public Assembler {
virtual void saveFrame(unsigned stackOffset, unsigned)
{
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Memory stackDst(rbx, stackOffset);
apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &stackDst));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &stackDst));
}
virtual void pushFrame(unsigned argumentCount, ...)
@ -952,7 +925,7 @@ class MyAssembler : public Assembler {
// TODO: Argument should be replaced by OperandInfo...
struct Argument {
unsigned size;
lir::OperandType type;
lir::Operand::Type type;
lir::Operand* operand;
};
RUNTIME_ARRAY(Argument, arguments, argumentCount);
@ -962,7 +935,7 @@ class MyAssembler : public Assembler {
for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
RUNTIME_ARRAY_BODY(arguments)[i].type
= static_cast<lir::OperandType>(va_arg(a, int));
= static_cast<lir::Operand::Type>(va_arg(a, int));
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord);
@ -974,14 +947,14 @@ class MyAssembler : public Assembler {
unsigned offset = 0;
for (unsigned i = 0; i < argumentCount; ++i) {
if (i < arch_->argumentRegisterCount()) {
lir::Register dst(arch_->argumentRegister(i));
lir::RegisterPair dst(arch_->argumentRegister(i));
apply(lir::Move,
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
RUNTIME_ARRAY_BODY(arguments)[i].type,
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord),
lir::RegisterOperand,
lir::Operand::Type::RegisterPair,
&dst));
} else {
lir::Memory dst(rsp, offset * TargetBytesPerWord);
@ -991,7 +964,7 @@ class MyAssembler : public Assembler {
RUNTIME_ARRAY_BODY(arguments)[i].operand),
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord),
lir::MemoryOperand,
lir::Operand::Type::Memory,
&dst));
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
TargetBytesPerWord);
@ -1001,67 +974,67 @@ class MyAssembler : public Assembler {
virtual void allocateFrame(unsigned footprint)
{
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
if (UseFramePointer) {
lir::Register base(rbp);
lir::RegisterPair base(rbp);
pushR(&c, TargetBytesPerWord, &base);
apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base));
}
lir::Constant footprintConstant(
resolvedPromise(&c, footprint * TargetBytesPerWord));
apply(lir::Subtract,
OperandInfo(
TargetBytesPerWord, lir::ConstantOperand, &footprintConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
TargetBytesPerWord, lir::Operand::Type::Constant, &footprintConstant),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
}
virtual void adjustFrame(unsigned difference)
{
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Constant differenceConstant(
resolvedPromise(&c, difference * TargetBytesPerWord));
apply(lir::Subtract,
OperandInfo(
TargetBytesPerWord, lir::ConstantOperand, &differenceConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
TargetBytesPerWord, lir::Operand::Type::Constant, &differenceConstant),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
}
virtual void popFrame(unsigned frameFootprint)
{
if (UseFramePointer) {
lir::Register base(rbp);
lir::Register stack(rsp);
lir::RegisterPair base(rbp);
lir::RegisterPair stack(rsp);
apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
popR(&c, TargetBytesPerWord, &base);
} else {
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Constant footprint(
resolvedPromise(&c, frameFootprint * TargetBytesPerWord));
apply(lir::Add,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &footprint),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
}
}
virtual void popFrameForTailCall(unsigned frameFootprint,
int offset,
int returnAddressSurrogate,
int framePointerSurrogate)
Register returnAddressSurrogate,
Register framePointerSurrogate)
{
if (TailCalls) {
if (offset) {
lir::Register tmp(c.client->acquireTemporary());
lir::RegisterPair tmp(c.client->acquireTemporary());
unsigned baseSize = UseFramePointer ? 1 : 0;
@ -1085,28 +1058,28 @@ class MyAssembler : public Assembler {
if (UseFramePointer) {
lir::Memory baseSrc(rsp, frameFootprint * TargetBytesPerWord);
lir::Register base(rbp);
lir::RegisterPair base(rbp);
moveMR(&c, TargetBytesPerWord, &baseSrc, TargetBytesPerWord, &base);
}
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Constant footprint(resolvedPromise(
&c, (frameFootprint - offset + baseSize) * TargetBytesPerWord));
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
if (returnAddressSurrogate != lir::NoRegister) {
if (returnAddressSurrogate != NoRegister) {
assertT(&c, offset > 0);
lir::Register ras(returnAddressSurrogate);
lir::RegisterPair ras(returnAddressSurrogate);
lir::Memory dst(rsp, offset * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
}
if (framePointerSurrogate != lir::NoRegister) {
if (framePointerSurrogate != NoRegister) {
assertT(&c, offset > 0);
lir::Register fps(framePointerSurrogate);
lir::RegisterPair fps(framePointerSurrogate);
lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &fps, TargetBytesPerWord, &dst);
}
@ -1127,10 +1100,10 @@ class MyAssembler : public Assembler {
assertT(&c, (argumentFootprint % StackAlignmentInWords) == 0);
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
lir::Register returnAddress(rcx);
lir::RegisterPair returnAddress(rcx);
popR(&c, TargetBytesPerWord, &returnAddress);
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Constant adjustment(resolvedPromise(
&c,
(argumentFootprint - StackAlignmentInWords) * TargetBytesPerWord));
@ -1147,10 +1120,10 @@ class MyAssembler : public Assembler {
{
popFrame(frameFootprint);
lir::Register returnAddress(rcx);
lir::RegisterPair returnAddress(rcx);
popR(&c, TargetBytesPerWord, &returnAddress);
lir::Register stack(rsp);
lir::RegisterPair stack(rsp);
lir::Memory stackSrc(rbx, stackOffsetFromThread);
moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &stack);
@ -1182,7 +1155,7 @@ class MyAssembler : public Assembler {
if (isBranch(op)) {
assertT(&this->c, a.size == b.size);
assertT(&this->c, c.size == TargetBytesPerWord);
assertT(&this->c, c.type == lir::ConstantOperand);
assertT(&this->c, c.type == lir::Operand::Type::Constant);
arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)](
&this->c, op, a.size, a.operand, b.operand, c.operand);

View File

@ -68,13 +68,13 @@ class ArchitectureContext {
bool useNativeFeatures;
OperationType operations[lir::OperationCount];
UnaryOperationType
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount];
unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
BinaryOperationType binaryOperations
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * lir::OperandTypeCount];
* lir::Operand::TypeCount * lir::Operand::TypeCount];
BranchOperationType branchOperations[lir::BranchOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
* lir::Operand::TypeCount
* lir::Operand::TypeCount];
};
class Context {

View File

@ -53,9 +53,9 @@ namespace x86 {
void maybeRex(Context* c,
unsigned size,
int a,
int index,
int base,
Register a,
Register index,
Register base,
bool always)
{
if (vm::TargetBytesPerWord == 8) {
@ -65,63 +65,63 @@ void maybeRex(Context* c,
} else {
byte = REX_NONE;
}
if (a != lir::NoRegister and (a & 8))
if (a != NoRegister and (a.index() & 8))
byte |= REX_R;
if (index != lir::NoRegister and (index & 8))
if (index != NoRegister and (index.index() & 8))
byte |= REX_X;
if (base != lir::NoRegister and (base & 8))
if (base != NoRegister and (base.index() & 8))
byte |= REX_B;
if (always or byte != REX_NONE)
c->code.append(byte);
}
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b)
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
{
maybeRex(c, size, a->low, lir::NoRegister, b->low, false);
maybeRex(c, size, a->low, NoRegister, b->low, false);
}
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b)
void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
{
maybeRex(c, size, a->low, lir::NoRegister, b->low, true);
maybeRex(c, size, a->low, NoRegister, b->low, true);
}
void maybeRex(Context* c, unsigned size, lir::Register* a)
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a)
{
maybeRex(c, size, lir::NoRegister, lir::NoRegister, a->low, false);
maybeRex(c, size, NoRegister, NoRegister, a->low, false);
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b)
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b)
{
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low & 4));
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low.index() & 4));
}
void maybeRex(Context* c, unsigned size, lir::Memory* a)
{
maybeRex(c, size, lir::NoRegister, a->index, a->base, false);
maybeRex(c, size, NoRegister, a->index, a->base, false);
}
void modrm(Context* c, uint8_t mod, int a, int b)
void modrm(Context* c, uint8_t mod, Register a, Register b)
{
c->code.append(mod | (regCode(b) << 3) | regCode(a));
}
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b)
void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b)
{
modrm(c, mod, a->low, b->low);
}
void sib(Context* c, unsigned scale, int index, int base)
void sib(Context* c, unsigned scale, Register index, Register base)
{
c->code.append((util::log(scale) << 6) | (regCode(index) << 3)
| regCode(base));
}
void modrmSib(Context* c, int width, int a, int scale, int index, int base)
void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base)
{
if (index == lir::NoRegister) {
if (index == NoRegister) {
modrm(c, width, base, a);
if (regCode(base) == rsp) {
if (regCode(base) == rsp.index()) {
sib(c, 0x00, rsp, rsp);
}
} else {
@ -130,9 +130,9 @@ void modrmSib(Context* c, int width, int a, int scale, int index, int base)
}
}
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset)
{
if (offset == 0 and regCode(base) != rbp) {
if (offset == 0 and regCode(base) != rbp.index()) {
modrmSib(c, 0x00, a, scale, index, base);
} else if (vm::fitsInInt8(offset)) {
modrmSib(c, 0x40, a, scale, index, base);
@ -143,7 +143,7 @@ void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
}
}
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b)
void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b)
{
modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset);
}
@ -177,9 +177,9 @@ void conditional(Context* c, unsigned condition, lir::Constant* a)
void sseMoveRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(c, aSize >= 4);
assertT(c, aSize == bSize);
@ -213,10 +213,10 @@ void sseMoveCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(c, aSize <= vm::TargetBytesPerWord);
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
lir::RegisterPair tmp(c->client->acquireTemporary(GeneralRegisterMask));
moveCR2(c, aSize, a, aSize, &tmp, 0);
sseMoveRR(c, aSize, &tmp, bSize, b);
c->client->releaseTemporary(tmp.low);
@ -226,7 +226,7 @@ void sseMoveMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b)
lir::RegisterPair* b)
{
assertT(c, aSize >= 4);
@ -244,7 +244,7 @@ void sseMoveMR(Context* c,
void sseMoveRM(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
UNUSED unsigned bSize,
lir::Memory* b)
{
@ -353,9 +353,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target)
void floatRegOp(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
uint8_t op,
uint8_t mod)
{
@ -373,7 +373,7 @@ void floatMemOp(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
uint8_t op)
{
if (aSize == 4) {
@ -390,13 +390,13 @@ void moveCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveCR2(Context* c,
UNUSED unsigned aSize,
lir::Constant* a,
UNUSED unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
unsigned promiseOffset)
{
if (vm::TargetBytesPerWord == 4 and bSize == 8) {
@ -408,7 +408,7 @@ void moveCR2(Context* c,
ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low);
lir::Register bh(b->high);
lir::RegisterPair bh(b->high);
moveCR(c, 4, &al, 4, b);
moveCR(c, 4, &ah, 4, &bh);

View File

@ -32,42 +32,42 @@ void maybeRex(Context* c,
int base,
bool always);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void maybeRex(Context* c, unsigned size, lir::Register* a);
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b);
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b);
void maybeRex(Context* c, unsigned size, lir::Memory* a);
inline int regCode(int a)
inline int regCode(Register a)
{
return a & 7;
return a.index() & 7;
}
inline int regCode(lir::Register* a)
inline int regCode(lir::RegisterPair* a)
{
return regCode(a->low);
}
inline bool isFloatReg(lir::Register* a)
inline bool isFloatReg(lir::RegisterPair* a)
{
return a->low >= xmm0;
}
void modrm(Context* c, uint8_t mod, int a, int b);
void modrm(Context* c, uint8_t mod, Register a, Register b);
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b);
void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b);
void sib(Context* c, unsigned scale, int index, int base);
void sib(Context* c, unsigned scale, Register index, Register base);
void modrmSib(Context* c, int width, int a, int scale, int index, int base);
void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base);
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset);
void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset);
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b);
void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b);
void opcode(Context* c, uint8_t op);
@ -79,25 +79,25 @@ void conditional(Context* c, unsigned condition, lir::Constant* a);
void sseMoveRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void sseMoveCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void sseMoveMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void sseMoveRM(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
UNUSED unsigned bSize,
lir::Memory* b);
@ -107,9 +107,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target);
void floatRegOp(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
uint8_t op,
uint8_t mod = 0xc0);
@ -117,14 +117,14 @@ void floatMemOp(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
uint8_t op);
void moveCR2(Context* c,
UNUSED unsigned aSize,
lir::Constant* a,
UNUSED unsigned bSize,
lir::Register* b,
lir::RegisterPair* b,
unsigned promiseOffset);
} // namespace x86

View File

@ -28,42 +28,42 @@ using namespace util;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
lir::Operand::Type operand1,
lir::Operand::Type operand2)
{
return operation + ((lir::BinaryOperationCount
+ lir::NonBranchTernaryOperationCount) * operand1)
+ lir::NonBranchTernaryOperationCount) * (unsigned)operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
* lir::Operand::TypeCount * (unsigned)operand2);
}
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
lir::Operand::Type operand1,
lir::Operand::Type operand2)
{
assertT(c, not isBranch(operation));
return lir::BinaryOperationCount + operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* operand1)
* (unsigned)operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
* lir::Operand::TypeCount * (unsigned)operand2);
}
unsigned branchIndex(ArchitectureContext* c UNUSED,
lir::OperandType operand1,
lir::OperandType operand2)
lir::Operand::Type operand1,
lir::Operand::Type operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
}
void populateTables(ArchitectureContext* c)
{
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
const lir::Operand::Type C = lir::Operand::Type::Constant;
const lir::Operand::Type A = lir::Operand::Type::Address;
const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
const lir::Operand::Type M = lir::Operand::Type::Memory;
OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations;

View File

@ -23,17 +23,17 @@ class ArchitectureContext;
unsigned index(ArchitectureContext*,
lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
lir::Operand::Type operand1,
lir::Operand::Type operand2);
unsigned index(ArchitectureContext* c UNUSED,
lir::TernaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
lir::Operand::Type operand1,
lir::Operand::Type operand2);
unsigned branchIndex(ArchitectureContext* c UNUSED,
lir::OperandType operand1,
lir::OperandType operand2);
lir::Operand::Type operand1,
lir::Operand::Type operand2);
void populateTables(ArchitectureContext* c);

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ void callC(Context* c, unsigned size UNUSED, lir::Constant* a);
void longCallC(Context* c, unsigned size, lir::Constant* a);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a);
void jumpR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a);
@ -41,7 +41,7 @@ void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a);
void longJumpC(Context* c, unsigned size, lir::Constant* a);
void callR(Context* c, unsigned size UNUSED, lir::Register* a);
void callR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
void callM(Context* c, unsigned size UNUSED, lir::Memory* a);
@ -53,51 +53,51 @@ void alignedJumpC(Context* c, unsigned size, lir::Constant* a);
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a);
void pushR(Context* c, unsigned size, lir::Register* a);
void pushR(Context* c, unsigned size, lir::RegisterPair* a);
void popR(Context* c, unsigned size, lir::Register* a);
void popR(Context* c, unsigned size, lir::RegisterPair* a);
void negateR(Context* c, unsigned size, lir::Register* a);
void negateR(Context* c, unsigned size, lir::RegisterPair* a);
void negateRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b UNUSED);
lir::RegisterPair* b UNUSED);
void moveCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveZCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void swapRR(Context* c,
unsigned aSize UNUSED,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void moveRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
UNUSED unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveRM(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Memory* b);
@ -105,7 +105,7 @@ void moveAR(Context* c,
unsigned aSize,
lir::Address* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void moveCM(Context* c,
unsigned aSize UNUSED,
@ -115,111 +115,111 @@ void moveCM(Context* c,
void moveZRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void moveZMR(Context* c,
unsigned aSize UNUSED,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void addCarryRR(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void addCarryRR(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
void addRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::Register* b);
void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::RegisterPair* b);
void addCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void subtractBorrowCR(Context* c,
unsigned size UNUSED,
lir::Constant* a,
lir::Register* b);
lir::RegisterPair* b);
void subtractCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void subtractBorrowRR(Context* c,
unsigned size,
lir::Register* a,
lir::Register* b);
lir::RegisterPair* a,
lir::RegisterPair* b);
void subtractRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void andRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void andCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void orRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void orCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void xorRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void xorCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void multiplyRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void compareRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void compareCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void compareRM(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Memory* b);
@ -231,9 +231,9 @@ void compareCM(Context* c,
void compareFloatRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void branchLong(Context* c,
lir::TernaryOperation op,
@ -247,21 +247,21 @@ void branchLong(Context* c,
void branchRR(Context* c,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::Register* b,
lir::RegisterPair* a,
lir::RegisterPair* b,
lir::Constant* target);
void branchCR(Context* c,
lir::TernaryOperation op,
unsigned size,
lir::Constant* a,
lir::Register* b,
lir::RegisterPair* b,
lir::Constant* target);
void branchRM(Context* c,
lir::TernaryOperation op,
unsigned size,
lir::Register* a,
lir::RegisterPair* a,
lir::Memory* b,
lir::Constant* target);
@ -276,181 +276,181 @@ void multiplyCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void divideRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b UNUSED);
lir::RegisterPair* b UNUSED);
void remainderRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void doShift(Context* c,
UNUSED void (*shift)(Context*,
unsigned,
lir::Register*,
lir::RegisterPair*,
unsigned,
lir::Register*),
lir::RegisterPair*),
int type,
UNUSED unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void shiftLeftRR(Context* c,
UNUSED unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void shiftLeftCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void shiftRightRR(Context* c,
UNUSED unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void shiftRightCR(Context* c,
unsigned aSize,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void unsignedShiftRightRR(Context* c,
UNUSED unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void unsignedShiftRightCR(Context* c,
unsigned aSize UNUSED,
lir::Constant* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void floatSqrtRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatSqrtMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatAddRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatAddMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatSubtractRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatSubtractMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatMultiplyRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatMultiplyMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatDivideRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatDivideMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void float2FloatRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void float2FloatMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void float2IntRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void float2IntMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void int2FloatRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void int2FloatMR(Context* c,
unsigned aSize,
lir::Memory* a,
unsigned bSize,
lir::Register* b);
lir::RegisterPair* b);
void floatNegateRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void floatAbsoluteRR(Context* c,
unsigned aSize UNUSED,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b);
lir::RegisterPair* b);
void absoluteRR(Context* c,
unsigned aSize,
lir::Register* a,
lir::RegisterPair* a,
unsigned bSize UNUSED,
lir::Register* b UNUSED);
lir::RegisterPair* b UNUSED);
} // namespace x86
} // namespace codegen

View File

@ -15,50 +15,45 @@ namespace avian {
namespace codegen {
namespace x86 {
enum {
rax = 0,
rcx = 1,
rdx = 2,
rbx = 3,
rsp = 4,
rbp = 5,
rsi = 6,
rdi = 7,
r8 = 8,
r9 = 9,
r10 = 10,
r11 = 11,
r12 = 12,
r13 = 13,
r14 = 14,
r15 = 15,
};
constexpr Register rax((int)0);
constexpr Register rcx(1);
constexpr Register rdx(2);
constexpr Register rbx(3);
constexpr Register rsp(4);
constexpr Register rbp(5);
constexpr Register rsi(6);
constexpr Register rdi(7);
constexpr Register r8(8);
constexpr Register r9(9);
constexpr Register r10(10);
constexpr Register r11(11);
constexpr Register r12(12);
constexpr Register r13(13);
constexpr Register r14(14);
constexpr Register r15(15);
constexpr Register xmm0(16);
constexpr Register xmm1(16 + 1);
constexpr Register xmm2(16 + 2);
constexpr Register xmm3(16 + 3);
constexpr Register xmm4(16 + 4);
constexpr Register xmm5(16 + 5);
constexpr Register xmm6(16 + 6);
constexpr Register xmm7(16 + 7);
constexpr Register xmm8(16 + 8);
constexpr Register xmm9(16 + 9);
constexpr Register xmm10(16 + 10);
constexpr Register xmm11(16 + 11);
constexpr Register xmm12(16 + 12);
constexpr Register xmm13(16 + 13);
constexpr Register xmm14(16 + 14);
constexpr Register xmm15(16 + 15);
enum {
xmm0 = r15 + 1,
xmm1,
xmm2,
xmm3,
xmm4,
xmm5,
xmm6,
xmm7,
xmm8,
xmm9,
xmm10,
xmm11,
xmm12,
xmm13,
xmm14,
xmm15,
};
constexpr Register LongJumpRegister = r10;
const int LongJumpRegister = r10;
const unsigned GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
constexpr RegisterMask GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
: 0x0000ffff;
const unsigned FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000
constexpr RegisterMask FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000
: 0xffff0000;
} // namespace x86

View File

@ -30,7 +30,8 @@ Architecture* makeArchitectureNative(vm::System* system,
#elif(AVIAN_TARGET_ARCH == AVIAN_ARCH_X86) \
|| (AVIAN_TARGET_ARCH == AVIAN_ARCH_X86_64)
return makeArchitectureX86(system, useNativeFeatures);
#elif AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM
#elif (AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM) \
|| (AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM64)
return makeArchitectureArm(system, useNativeFeatures);
#else
#error "Unsupported codegen target"

View File

@ -16,11 +16,11 @@
#define BYTES_PER_WORD 4
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
# define GLOBAL(x) x
#endif
#define CONTINUATION_NEXT 4
@ -29,7 +29,7 @@
#define CONTINUATION_FRAME_POINTER_OFFSET 24
#define CONTINUATION_LENGTH 28
#define CONTINUATION_BODY 32
.globl GLOBAL(vmInvoke)
.align 2
GLOBAL(vmInvoke):
@ -56,7 +56,7 @@ GLOBAL(vmInvoke):
eor r4, sp, r3
tst r4, #4
subne sp, sp, #4
// copy arguments into place
sub sp, r3
mov r4, #0
@ -87,7 +87,7 @@ LOCAL(vmInvoke_argumentTest):
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
ldr sp, [r8, #TARGET_THREAD_SCRATCH]
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
@ -109,7 +109,7 @@ GLOBAL(vmInvoke_safeStack):
ldr r6,[r5,#CONTINUATION_LENGTH]
lsl r6,r6,#2
neg r7,r6
add r7,r7,#-80
add r7,r7,#-80 // 80 bytes for callee-saved register values
mov r4,sp
str r4,[sp,r7]!
@ -167,10 +167,10 @@ LOCAL(vmInvoke_handleException):
bx r7
LOCAL(vmInvoke_exit):
#endif // AVIAN_CONTINUATIONS
mov ip, #0
str ip, [r8, #TARGET_THREAD_STACK]
#endif // AVIAN_CONTINUATIONS
// restore return type
ldr ip, [sp], #4
@ -201,7 +201,7 @@ GLOBAL(vmJumpAndInvoke):
// which is not true in this case
sub r2,r2,r6
sub r2,r2,#84
mov r8,r0
// copy arguments into place
@ -220,7 +220,7 @@ LOCAL(vmJumpAndInvoke_argumentTest):
// the arguments have been copied, so we can set the real stack
// pointer now
mov sp,r2
// set return address to vmInvoke_returnAddress
#ifdef __APPLE__
movw r11, :lower16:(GLOBAL(vmInvoke_returnAddress)-(LOCAL(vmJumpAndInvoke_getAddress)+8))
@ -246,7 +246,7 @@ LOCAL(vmInvoke_getAddress_word):
LOCAL(vmJumpAndInvoke_getAddress_word):
.word _GLOBAL_OFFSET_TABLE_-(LOCAL(vmJumpAndInvoke_getAddress)+8)
#endif // not __APPLE__
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled, so we force a crash if we reach here:

222
src/compile-arm64.S Normal file
View File

@ -0,0 +1,222 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
.text
#define BYTES_PER_WORD 8
#define LOCAL(x) .L##x
#ifdef __APPLE__
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
#define CONTINUATION_NEXT 8
#define CONTINUATION_ADDRESS 32
#define CONTINUATION_RETURN_ADDRESS_OFFSET 40
#define CONTINUATION_FRAME_POINTER_OFFSET 48
#define CONTINUATION_LENGTH 56
#define CONTINUATION_BODY 64
.globl GLOBAL(vmInvoke)
.align 2
GLOBAL(vmInvoke):
// arguments:
// x0 : thread
// x1 : function
// x2 : arguments
// w3 : argumentFootprint
// w4 : frameSize (not used)
// w5 : returnType
// allocate frame
stp x29, x30, [sp,#-96]!
mov x29, sp
// save callee-saved register values
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
// save return type
str w5, [sp,#-16]!
mov x5, sp
str x5, [x0,#TARGET_THREAD_SCRATCH]
// copy arguments into place, reserving enough space for them, plus
// alignment padding
sub x5, sp, w3, uxtw
and sp, x5, #-16
mov x4, #0
b LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
ldr x5, [x2, x4]
str x5, [sp, x4]
add x4, x4, #BYTES_PER_WORD
LOCAL(vmInvoke_argumentTest):
cmp x4, x3
b.lt LOCAL(vmInvoke_argumentLoop)
// we use x19 to hold the thread pointer, by convention
mov x19, x0
// load and call function address
blr x1
.globl GLOBAL(vmInvoke_returnAddress)
.align 2
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
ldr x5, [x19, #TARGET_THREAD_SCRATCH]
mov sp, x5
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
str xzr, [x19, #TARGET_THREAD_STACK]
.globl GLOBAL(vmInvoke_safeStack)
.align 2
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
// call the next continuation, if any
ldr x5, [x19,#TARGET_THREAD_CONTINUATION]
cmp x5, xzr
b.eq LOCAL(vmInvoke_exit)
ldr x6, [x5,#CONTINUATION_LENGTH]
lsl x6, x6, #3
neg x7, x6
add x7, x7, #-128 // 128 bytes for callee-saved register values
mov x4, sp
add sp, sp, x7
str x4, [sp]
add x7, x5, #CONTINUATION_BODY
mov x11, xzr
b LOCAL(vmInvoke_continuationTest)
LOCAL(vmInvoke_continuationLoop):
ldr x9, [x7,x11]
str x9, [sp,x11]
add x11, x11, #8
LOCAL(vmInvoke_continuationTest):
cmp x11, x6
b.le LOCAL(vmInvoke_continuationLoop)
ldr x7, [x5,#CONTINUATION_RETURN_ADDRESS_OFFSET]
adr x11, GLOBAL(vmInvoke_returnAddress)
str x11, [sp,x7]
ldr x7, [x5,#CONTINUATION_NEXT]
str x7, [x19,#TARGET_THREAD_CONTINUATION]
// call the continuation unless we're handling an exception
ldr x7, [x19,#TARGET_THREAD_EXCEPTION]
cmp x7, xzr
b.ne LOCAL(vmInvoke_handleException)
ldr x7, [x5,#CONTINUATION_ADDRESS]
br x7
LOCAL(vmInvoke_handleException):
// we're handling an exception - call the exception handler instead
str xzr, [x19,#TARGET_THREAD_EXCEPTION]
ldr x11, [x19,#TARGET_THREAD_EXCEPTIONSTACKADJUSTMENT]
ldr x9, [sp]
neg x11, x11
add sp, sp, x11
str x9, [sp]
ldr x11, [x19,#TARGET_THREAD_EXCEPTIONOFFSET]
str x7, [sp,x11]
ldr x7, [x19,#TARGET_THREAD_EXCEPTIONHANDLER]
br x7
LOCAL(vmInvoke_exit):
str xzr, [x19, #TARGET_THREAD_STACK]
#endif // AVIAN_CONTINUATIONS
// restore return type
ldr w5, [sp],#16
// restore callee-saved register values
ldp x19, x20, [sp,#16]
ldp x21, x22, [sp,#32]
ldp x23, x24, [sp,#48]
ldp x25, x26, [sp,#64]
ldp x27, x28, [sp,#80]
ldp x29, x30, [sp],#96
LOCAL(vmInvoke_return):
br x30
.globl GLOBAL(vmJumpAndInvoke)
.align 2
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// x0: thread
// x1: address
// x2: stack
// x3: argumentFootprint
// x4: arguments
// x5: frameSize
// allocate new frame, adding room for callee-saved registers, plus
// 8 bytes of padding since the calculation of frameSize assumes 8
// bytes have already been allocated to save the return address,
// which is not true in this case
sub x2, x2, x5
sub x2, x2, #136
mov x19, x0
// copy arguments into place
mov x6, xzr
b LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
ldr x12, [x4,x6]
str x12, [x2,x6]
add x6, x6, #4
LOCAL(vmJumpAndInvoke_argumentTest):
cmp x6, x3
ble LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
mov sp, x2
// set return address to vmInvoke_returnAddress
adr x30, GLOBAL(vmInvoke_returnAddress)
br x1
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled, so we force a crash if we reach here:
brk 0
#endif // not AVIAN_CONTINUATIONS

455
src/compile-x86_64.S Normal file
View File

@ -0,0 +1,455 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#include "avian/target-fields.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#ifdef __x86_64__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 8
#endif
#if defined __MINGW32__ || defined __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 64 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rcx: thread
// %rdx: function
// %r8 : arguments
// %r9 : argumentsFootprint
// 48(%rbp) : frameSize
// 56(%rbp) : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rcx)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rsi,40(%rsp)
movq %rdi,48(%rsp)
// allocate stack space for arguments
movl 48(%rbp),%eax
subq %rax,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rcx,%rbx
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%r8,%r11,1),%rsi
movq %rsi,(%rsp,%r11,1)
addq $8,%r11
LOCAL(vmInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rdx
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rsi
movq 48(%rsp),%rdi
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rcx: thread
// %rdx: address
// %r8 : stack
// %r9 : argumentFootprint
// 40(%rsp): arguments
// 48(%rsp): frameSize
// allocate new frame, adding room for callee-saved registers
movl 48(%rsp),%eax
subq %rax,%r8
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%r8
movq %rcx,%rbx
// set return address
leaq GLOBAL(vmInvoke_returnAddress)(%rip),%r10
movq %r10,(%r8)
// copy arguments into place
movq $0,%r11
movl 40(%rsp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%rax,%r11,1),%r10
movq %r10,8(%r8,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %r9,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %r8,%rsp
jmp *%rdx
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#else // not __MINGW32__ || __CYGWIN32__
#define CALLEE_SAVED_REGISTER_FOOTPRINT 48 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushq %rbp
movq %rsp,%rbp
// %rdi: thread
// %rsi: function
// %rdx: arguments
// %rcx: argumentFootprint
// %r8 : frameSize
// %r9 : returnType (ignored)
// allocate stack space for callee-saved registers
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movq %rsp,TARGET_THREAD_SCRATCH(%rdi)
// save callee-saved registers
movq %rbx,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
// allocate stack space for arguments
subq %r8,%rsp
// we use rbx to hold the thread pointer, by convention
mov %rdi,%rbx
// copy arguments into place
movq $0,%r9
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movq (%rdx,%r9,1),%r8
movq %r8,(%rsp,%r9,1)
addq $8,%r9
LOCAL(vmInvoke_argumentTest):
cmpq %rcx,%r9
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *%rsi
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movq TARGET_THREAD_SCRATCH(%rbx),%rsp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movq $0,TARGET_THREAD_STACK(%rbx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movq 0(%rsp),%rbx
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
addq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rsp
// return
popq %rbp
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// %rdi: thread
// %rsi: address
// %rdx: stack
// %rcx: argumentFootprint
// %r8 : arguments
// %r9 : frameSize
// allocate new frame, adding room for callee-saved registers
subq %r9,%rdx
subq $CALLEE_SAVED_REGISTER_FOOTPRINT,%rdx
movq %rdi,%rbx
// set return address
movq GLOBAL(vmInvoke_returnAddress)@GOTPCREL(%rip),%r10
movq %r10,(%rdx)
// copy arguments into place
movq $0,%r11
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movq (%r8,%r11,1),%r10
movq %r10,8(%rdx,%r11,1)
addq $8,%r11
LOCAL(vmJumpAndInvoke_argumentTest):
cmpq %rcx,%r11
jb LOCAL(vmJumpAndInvoke_argumentLoop)
// the arguments have been copied, so we can set the real stack
// pointer now
movq %rdx,%rsp
jmp *%rsi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // not AVIAN_CONTINUATIONS
#endif // not __MINGW32__ || __CYGWIN32__
#elif defined __i386__
#ifdef AVIAN_USE_FRAME_POINTER
# define ALIGNMENT_ADJUSTMENT 0
#else
# define ALIGNMENT_ADJUSTMENT 12
#endif
#define CALLEE_SAVED_REGISTER_FOOTPRINT 16 + ALIGNMENT_ADJUSTMENT
.globl GLOBAL(vmInvoke)
GLOBAL(vmInvoke):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): thread
// 12(%ebp): function
// 16(%ebp): arguments
// 20(%ebp): argumentFootprint
// 24(%ebp): frameSize
// 28(%ebp): returnType
// allocate stack space for callee-saved registers
subl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// remember this stack position, since we won't be able to rely on
// %rbp being restored when the call returns
movl 8(%ebp),%eax
movl %esp,TARGET_THREAD_SCRATCH(%eax)
movl %ebx,0(%esp)
movl %esi,4(%esp)
movl %edi,8(%esp)
// allocate stack space for arguments
subl 24(%ebp),%esp
// we use ebx to hold the thread pointer, by convention
mov %eax,%ebx
// copy arguments into place
movl $0,%ecx
movl 16(%ebp),%edx
jmp LOCAL(vmInvoke_argumentTest)
LOCAL(vmInvoke_argumentLoop):
movl (%edx,%ecx,1),%eax
movl %eax,(%esp,%ecx,1)
addl $4,%ecx
LOCAL(vmInvoke_argumentTest):
cmpl 20(%ebp),%ecx
jb LOCAL(vmInvoke_argumentLoop)
// call function
call *12(%ebp)
.globl GLOBAL(vmInvoke_returnAddress)
GLOBAL(vmInvoke_returnAddress):
// restore stack pointer
movl TARGET_THREAD_SCRATCH(%ebx),%esp
// clear MyThread::stack to avoid confusing another thread calling
// java.lang.Thread.getStackTrace on this one. See
// MyProcess::getStackTrace in compile.cpp for details on how we get
// a reliable stack trace from a thread that might be interrupted at
// any point in its execution.
movl $0,TARGET_THREAD_STACK(%ebx)
.globl GLOBAL(vmInvoke_safeStack)
GLOBAL(vmInvoke_safeStack):
#ifdef AVIAN_CONTINUATIONS
# include "continuations-x86.S"
#endif // AVIAN_CONTINUATIONS
// restore callee-saved registers
movl 0(%esp),%ebx
movl 4(%esp),%esi
movl 8(%esp),%edi
addl $CALLEE_SAVED_REGISTER_FOOTPRINT,%esp
// handle return value based on expected type
movl 28(%esp),%ecx
popl %ebp
ret
LOCAL(getPC):
movl (%esp),%esi
ret
.globl GLOBAL(vmJumpAndInvoke)
GLOBAL(vmJumpAndInvoke):
#ifdef AVIAN_CONTINUATIONS
// 4(%esp): thread
// 8(%esp): address
// 12(%esp): stack
// 16(%esp): argumentFootprint
// 20(%esp): arguments
// 24(%esp): frameSize
movl 12(%esp),%ecx
// allocate new frame, adding room for callee-saved registers,
// return address, and frame pointer
subl 24(%esp),%ecx
subl $CALLEE_SAVED_REGISTER_FOOTPRINT+8,%ecx
movl 4(%esp),%ebx
// set return address
#if defined __MINGW32__ || defined __CYGWIN32__
movl $GLOBAL(vmInvoke_returnAddress),%esi
#else
call LOCAL(getPC)
# if defined __APPLE__
LOCAL(vmJumpAndInvoke_offset):
leal GLOBAL(vmInvoke_returnAddress)-LOCAL(vmJumpAndInvoke_offset)(%esi),%esi
# else
addl $_GLOBAL_OFFSET_TABLE_,%esi
movl GLOBAL(vmInvoke_returnAddress)@GOT(%esi),%esi
# endif
#endif
movl %esi,(%ecx)
// copy arguments into place
movl $0,%esi
movl 16(%esp),%edx
movl 20(%esp),%eax
jmp LOCAL(vmJumpAndInvoke_argumentTest)
LOCAL(vmJumpAndInvoke_argumentLoop):
movl (%eax,%esi,1),%edi
movl %edi,4(%ecx,%esi,1)
addl $4,%esi
LOCAL(vmJumpAndInvoke_argumentTest):
cmpl %edx,%esi
jb LOCAL(vmJumpAndInvoke_argumentLoop)
movl 8(%esp),%esi
// the arguments have been copied, so we can set the real stack
// pointer now
movl %ecx,%esp
jmp *%esi
#else // not AVIAN_CONTINUATIONS
// vmJumpAndInvoke should only be called when continuations are
// enabled
int3
#endif // AVIAN_CONTINUATIONS
#else
#error unsupported architecture
#endif //def __x86_64__

View File

@ -2189,6 +2189,8 @@ GcContinuation* makeCurrentContinuation(MyThread* t,
*targetIp = 0;
while (*targetIp == 0) {
assertT(t, ip);
GcMethod* method = methodForIp(t, ip);
if (method) {
PROTECT(t, method);
@ -9783,22 +9785,22 @@ void compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
if (processor(t)->bootImage) {
lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE);
lir::Register scratch(t->arch->scratch());
lir::RegisterPair scratch(t->arch->scratch());
a->apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &table),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &table),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
lir::Memory proc(scratch.low, index * TargetBytesPerWord);
a->apply(lir::Move,
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &proc),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &proc),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
a->apply(call ? lir::Call : lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
} else {
lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise(
reinterpret_cast<intptr_t>(t->thunkTable[index])));
a->apply(call ? lir::LongCall : lir::LongJump,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &proc));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &proc));
}
}
@ -9814,16 +9816,16 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.default_.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, compileMethodIndex);
a->popFrame(t->arch->alignFrameSize(1));
lir::Register result(t->arch->returnLow());
lir::RegisterPair result(t->arch->returnLow());
a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
@ -9835,7 +9837,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
Context context(t);
avian::codegen::Assembler* a = context.assembler;
lir::Register class_(t->arch->virtualCallTarget());
lir::RegisterPair class_(t->arch->virtualCallTarget());
lir::Memory virtualCallTargetSrc(
t->arch->stack(),
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
@ -9843,41 +9845,41 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
a->apply(lir::Move,
OperandInfo(
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetSrc),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_));
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetSrc),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_));
lir::Memory virtualCallTargetDst(t->arch->thread(),
TARGET_THREAD_VIRTUALCALLTARGET);
a->apply(
lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_),
OperandInfo(
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetDst));
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetDst));
lir::Register index(t->arch->virtualCallIndex());
lir::RegisterPair index(t->arch->virtualCallIndex());
lir::Memory virtualCallIndex(t->arch->thread(),
TARGET_THREAD_VIRTUALCALLINDEX);
a->apply(
lir::Move,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &index),
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &virtualCallIndex));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &index),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallIndex));
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
p->thunks.defaultVirtual.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, compileVirtualMethodIndex);
a->popFrame(t->arch->alignFrameSize(1));
lir::Register result(t->arch->returnLow());
lir::RegisterPair result(t->arch->returnLow());
a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
@ -9893,8 +9895,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.native.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, invokeNativeIndex);
@ -9915,8 +9917,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.aioob.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, throwArrayIndexOutOfBoundsIndex);
@ -9934,8 +9936,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
p->thunks.stackOverflow.frameSavedOffset = a->length();
lir::Register thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
lir::RegisterPair thread(t->arch->thread());
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
compileCall(t, &context, throwStackOverflowIndex);
@ -10058,17 +10060,17 @@ uintptr_t compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
avian::codegen::ResolvedPromise indexPromise(index);
lir::Constant indexConstant(&indexPromise);
lir::Register indexRegister(t->arch->virtualCallIndex());
lir::RegisterPair indexRegister(t->arch->virtualCallIndex());
a->apply(
lir::Move,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &indexConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &indexRegister));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &indexConstant),
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &indexRegister));
avian::codegen::ResolvedPromise defaultVirtualThunkPromise(
defaultVirtualThunk(t));
lir::Constant thunk(&defaultVirtualThunkPromise);
a->apply(lir::Jump,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &thunk));
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &thunk));
*size = a->endBlock(false)->resolve(0, 0);

145
src/i386.S Normal file
View File

@ -0,0 +1,145 @@
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/types.h"
#define LOCAL(x) .L##x
#if defined __APPLE__ \
|| ((defined __MINGW32__ || defined __CYGWIN32__) && ! defined __x86_64__)
# define GLOBAL(x) _##x
#else
# define GLOBAL(x) x
#endif
.text
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret

View File

@ -50,20 +50,11 @@ bool writeObject(uint8_t* data,
OutputStream* out,
const char* startName,
const char* endName,
const char* format,
const char* architecture,
Platform* platform,
unsigned alignment,
bool writable,
bool executable)
{
Platform* platform = Platform::getPlatform(
PlatformInfo(PlatformInfo::formatFromString(format),
PlatformInfo::archFromString(architecture)));
if (!platform) {
fprintf(stderr, "unsupported platform: %s/%s\n", format, architecture);
return false;
}
SymbolInfo symbols[] = {SymbolInfo(0, startName), SymbolInfo(size, endName)};
@ -113,6 +104,19 @@ int main(int argc, const char** argv)
}
}
const char* format = argv[5];
const char* architecture = argv[6];
Platform* platform = Platform::getPlatform(
PlatformInfo(PlatformInfo::formatFromString(format),
PlatformInfo::archFromString(architecture)));
if (!platform) {
fprintf(stderr, "unsupported platform: %s/%s\n", format, architecture);
return 1;
}
uint8_t* data = 0;
unsigned size;
int fd = open(argv[1], O_RDONLY);
@ -148,8 +152,7 @@ int main(int argc, const char** argv)
&out,
argv[3],
argv[4],
argv[5],
argv[6],
platform,
alignment,
writable,
executable);

View File

@ -49,6 +49,7 @@
#define EM_386 3
#define EM_X86_64 62
#define EM_ARM 40
#define EM_AARCH64 183
#define SHT_PROGBITS 1
#define SHT_SYMTAB 2
@ -129,6 +130,8 @@ unsigned getElfPlatform(PlatformInfo::Architecture arch)
return EM_386;
case PlatformInfo::Arm:
return EM_ARM;
case PlatformInfo::Arm64:
return EM_AARCH64;
default:
return ~0;
}
@ -398,6 +401,7 @@ class ElfPlatform : public Platform {
ElfPlatform<uint32_t> elfX86Platform(PlatformInfo::x86);
ElfPlatform<uint32_t> elfArmPlatform(PlatformInfo::Arm);
ElfPlatform<uint64_t> elfArm64Platform(PlatformInfo::Arm64);
ElfPlatform<uint64_t> elfX86_64Platform(PlatformInfo::x86_64);
} // namespace

View File

@ -33,10 +33,12 @@
#define CPU_TYPE_I386 7
#define CPU_TYPE_X86_64 (CPU_TYPE_I386 | CPU_ARCH_ABI64)
#define CPU_TYPE_ARM 12
#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
#define CPU_SUBTYPE_I386_ALL 3
#define CPU_SUBTYPE_X86_64_ALL CPU_SUBTYPE_I386_ALL
#define CPU_SUBTYPE_ARM_V7 9
#define CPU_SUBTYPE_ARM_V8 13
namespace {
@ -157,6 +159,10 @@ class MachOPlatform : public Platform {
cpuType = CPU_TYPE_ARM;
cpuSubType = CPU_SUBTYPE_ARM_V7;
break;
case PlatformInfo::Arm64:
cpuType = CPU_TYPE_ARM64;
cpuSubType = CPU_SUBTYPE_ARM_V8;
break;
default:
// should never happen (see MachOPlatform declarations at bottom)
fprintf(stderr, "unsupported architecture: %d\n", info.arch);
@ -280,6 +286,7 @@ class MachOPlatform : public Platform {
MachOPlatform<uint32_t> darwinx86Platform(PlatformInfo::x86);
MachOPlatform<uint32_t> darwinArmPlatform(PlatformInfo::Arm);
MachOPlatform<uint64_t> darwinArm64Platform(PlatformInfo::Arm64);
MachOPlatform<uint64_t> darwinx86_64Platform(PlatformInfo::x86_64);
} // namespace

View File

@ -115,6 +115,8 @@ PlatformInfo::Architecture PlatformInfo::archFromString(const char* arch)
return Architecture::x86_64;
} else if (strcmp(arch, "arm") == 0) {
return Architecture::Arm;
} else if (strcmp(arch, "arm64") == 0) {
return Architecture::Arm64;
} else {
return Architecture::UnknownArch;
}

View File

@ -1408,8 +1408,20 @@ void writeInitializations(Output* out, Module& module)
}
}
void writeJavaInitialization(Output* out, Class* cl)
void writeJavaInitialization(Output* out,
Class* cl,
std::set<Class*>& alreadyInited)
{
if (alreadyInited.find(cl) != alreadyInited.end()) {
return;
}
alreadyInited.insert(cl);
if (cl->super) {
writeJavaInitialization(out, cl->super, alreadyInited);
}
out->write("bootJavaClass(t, Gc::");
out->write(capitalize(cl->name));
out->write("Type, ");
@ -1436,10 +1448,11 @@ void writeJavaInitialization(Output* out, Class* cl)
void writeJavaInitializations(Output* out, Module& module)
{
std::set<Class*> alreadyInited;
for (const auto p : module.classes) {
Class* cl = p.second;
if (cl->javaName.size()) {
writeJavaInitialization(out, cl);
writeJavaInitialization(out, cl, alreadyInited);
}
}
}

View File

@ -21,8 +21,6 @@
.text
#ifdef __x86_64__
#define CHECKPOINT_THREAD 8
#define CHECKPOINT_STACK 48
@ -340,130 +338,3 @@ GLOBAL(vmRun_returnAddress):
ret
#endif // not __MINGW32__
#elif defined __i386__
#define CHECKPOINT_THREAD 4
#define CHECKPOINT_STACK 24
#define CHECKPOINT_BASE 28
.globl GLOBAL(vmNativeCall)
GLOBAL(vmNativeCall):
pushl %ebp
movl %esp,%ebp
// 8(%ebp): function
// 12(%ebp): stack
// 16(%ebp): stackSize
// 20(%ebp): returnType
// reserve space for arguments
movl 16(%ebp),%ecx
subl %ecx,%esp
//# ifdef __APPLE__
// align to a 16 byte boundary
andl $0xFFFFFFF0,%esp
//# endif
// copy arguments into place
movl $0,%ecx
jmp LOCAL(test)
LOCAL(loop):
movl %ecx,%eax
movl %ecx,%edx
addl %esp,%edx
addl 12(%ebp),%eax
movl (%eax),%eax
movl %eax,(%edx)
addl $4,%ecx
LOCAL(test):
cmpl 16(%ebp),%ecx
jb LOCAL(loop)
// call function
call *8(%ebp)
// handle return value based on expected type
movl 20(%ebp),%ecx
LOCAL(void):
cmpl $VOID_TYPE,%ecx
jne LOCAL(int64)
jmp LOCAL(exit)
LOCAL(int64):
cmpl $INT64_TYPE,%ecx
jne LOCAL(float)
jmp LOCAL(exit)
LOCAL(float):
cmpl $FLOAT_TYPE,%ecx
jne LOCAL(double)
fstps 8(%ebp)
movl 8(%ebp),%eax
jmp LOCAL(exit)
LOCAL(double):
cmpl $DOUBLE_TYPE,%ecx
jne LOCAL(exit)
fstpl 8(%ebp)
movl 8(%ebp),%eax
movl 12(%ebp),%edx
LOCAL(exit):
movl %ebp,%esp
popl %ebp
ret
.globl GLOBAL(vmJump)
GLOBAL(vmJump):
movl 4(%esp),%esi
movl 8(%esp),%ebp
movl 16(%esp),%ebx
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 12(%esp),%esp
jmp *%esi
#define VMRUN_FRAME_SIZE 24
.globl GLOBAL(vmRun)
GLOBAL(vmRun):
// 8(%ebp): function
// 12(%ebp): arguments
// 16(%ebp): checkpoint
pushl %ebp
movl %esp,%ebp
subl $VMRUN_FRAME_SIZE,%esp
movl %ebx,8(%esp)
movl %esi,12(%esp)
movl %edi,16(%esp)
movl 12(%ebp),%eax
movl %eax,4(%esp)
movl 16(%ebp),%ecx
movl CHECKPOINT_THREAD(%ecx),%eax
movl %eax,0(%esp)
movl %esp,CHECKPOINT_STACK(%ecx)
call *8(%ebp)
.globl GLOBAL(vmRun_returnAddress)
GLOBAL(vmRun_returnAddress):
movl 8(%esp),%ebx
movl 12(%esp),%esi
movl 16(%esp),%edi
addl $VMRUN_FRAME_SIZE,%esp
popl %ebp
ret
#endif // __i386__

View File

@ -16,6 +16,12 @@ public class NullPointer {
}
public static void main(String[] args) {
try {
((Object) null).getClass();
} catch (Exception e) {
e.printStackTrace();
}
try {
throw_(null);
throw new RuntimeException();

View File

@ -1,9 +1,28 @@
#!/bin/sh
#!/usr/bin/env bash
set -e
set -eo pipefail
root_dir=$(pwd)
flags="${@}"
is-mac() {
if [[ $(uname -s) == "Darwin" || ${TRAVIS_OS_NAME} == "osx" ]]; then
return 0
fi
return 1
}
install-deps() {
if is-mac; then
echo "------ Installing dependencies for Mac ------"
else
echo "------ Installing dependencies for Linux ------"
sudo apt-get update -qq
sudo apt-get install -y libc6-dev-i386 mingw-w64 gcc-mingw-w64-x86-64 g++-mingw-w64-i686 binutils-mingw-w64-x86-64 lib32z1-dev zlib1g-dev g++-mingw-w64-x86-64
fi
}
run() {
echo '==============================================='
if [ ! $(pwd) = ${root_dir} ]; then
@ -23,10 +42,24 @@ run_cmake() {
cd ..
}
flags="${@}"
publish() {
local platforms="${1}"
local arches="${2}"
local platform
for platform in ${platforms}; do
local arch
for arch in ${arches}; do
echo "------ Publishing ${platform}-${arch} ------"
./gradlew artifactoryPublish -Pplatform=${platform} -Parch=${arch}
done
done
}
has_flag() {
local arg=$1
local arg=${1}
local f
for f in ${flags}; do
local key=$(echo $f | awk -F '=' '{print $1}')
if [ ${key} = ${arg} ]; then
@ -36,21 +69,35 @@ has_flag() {
return 1
}
make_target=test
### START ###
test `uname -o` = "Cygwin" || run_cmake -DCMAKE_BUILD_TYPE=Debug
install-deps
run make jdk-test
run make ${flags} ${make_target}
run make ${flags} mode=debug ${make_target}
run make ${flags} process=interpret ${make_target}
if [[ "${1}" == "PUBLISH" ]]; then
if is-mac; then
publish "macosx" "i386 x86_64"
elif [[ $(uname -s) == "Linux" ]]; then
publish "linux windows" "i386 x86_64"
fi
else
if [[ $(uname -o) != "Cygwin" ]]; then
run_cmake -DCMAKE_BUILD_TYPE=Debug
fi
(has_flag openjdk-src || ! has_flag openjdk) && \
run make ${flags} mode=debug bootimage=true ${make_target} && \
run make ${flags} bootimage=true ${make_target}
make_target=test
(! has_flag openjdk && ! has_flag android) && \
run make ${flags} openjdk=$JAVA_HOME ${make_target}
run make jdk-test
run make ${flags} ${make_target}
run make ${flags} mode=debug ${make_target}
run make ${flags} process=interpret ${make_target}
run make ${flags} tails=true continuations=true heapdump=true ${make_target}
run make ${flags} codegen-targets=all
(has_flag openjdk-src || ! has_flag openjdk) && \
run make ${flags} mode=debug bootimage=true ${make_target} && \
run make ${flags} bootimage=true ${make_target}
(! has_flag openjdk && ! has_flag android) && \
run make ${flags} openjdk=$JAVA_HOME ${make_target}
run make ${flags} tails=true continuations=true heapdump=true ${make_target}
run make ${flags} codegen-targets=all
fi

View File

@ -79,6 +79,6 @@ TEST(ArchitecturePlan)
(lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
assertFalse(thunk);
assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
assertNotEqual(static_cast<uint64_t>(0), mask.registerMask);
assertNotEqual(static_cast<uint64_t>(0), (uint64_t)mask.lowRegisterMask);
}
}

View File

@ -19,18 +19,29 @@ using namespace vm;
TEST(RegisterIterator)
{
RegisterMask regs(0x55);
BoundedRegisterMask regs(0x55);
assertEqual<unsigned>(0, regs.start);
assertEqual<unsigned>(7, regs.limit);
RegisterIterator it(regs);
assertTrue(it.hasNext());
assertEqual<unsigned>(0, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(2, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(4, it.next());
assertTrue(it.hasNext());
assertEqual<unsigned>(6, it.next());
assertFalse(it.hasNext());
for(int i = 0; i < 64; i++) {
assertEqual<unsigned>(i, BoundedRegisterMask(static_cast<uint64_t>(1) << i).start);
assertEqual<unsigned>(i + 1, BoundedRegisterMask(static_cast<uint64_t>(1) << i).limit);
}
auto it = regs.begin();
auto end = regs.end();
assertTrue(it != end);
assertEqual<unsigned>(6, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(4, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(2, (*it).index());
++it;
assertTrue(it != end);
assertEqual<unsigned>(0, (*it).index());
++it;
assertFalse(it != end);
}