2012-05-11 23:43:27 +00:00
|
|
|
/* Copyright (c) 2008-2012, Avian Contributors
|
2008-06-25 20:53:48 +00:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
#ifndef AVIAN_CODEGEN_ASSEMBLER_H
|
|
|
|
#define AVIAN_CODEGEN_ASSEMBLER_H
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2013-02-21 03:42:09 +00:00
|
|
|
#include <avian/vm/system/system.h>
|
2008-02-11 17:21:41 +00:00
|
|
|
#include "zone.h"
|
|
|
|
|
2013-02-20 05:12:28 +00:00
|
|
|
#include <avian/vm/codegen/lir.h>
|
|
|
|
#include <avian/vm/codegen/promise.h>
|
2013-02-11 15:07:46 +00:00
|
|
|
|
|
|
|
namespace avian {
|
|
|
|
namespace codegen {
|
|
|
|
|
2013-02-12 04:31:19 +00:00
|
|
|
class RegisterFile;
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
class OperandInfo {
|
|
|
|
public:
|
|
|
|
const unsigned size;
|
|
|
|
const lir::OperandType type;
|
|
|
|
lir::Operand* const operand;
|
|
|
|
|
|
|
|
inline OperandInfo(unsigned size, lir::OperandType type, lir::Operand* operand):
|
|
|
|
size(size),
|
|
|
|
type(type),
|
|
|
|
operand(operand)
|
|
|
|
{ }
|
|
|
|
};
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
#ifdef AVIAN_TAILS
|
|
|
|
const bool TailCalls = true;
|
|
|
|
#else
|
|
|
|
const bool TailCalls = false;
|
|
|
|
#endif
|
|
|
|
|
2011-03-01 17:59:00 +00:00
|
|
|
#if (defined AVIAN_USE_FRAME_POINTER) || (defined ARCH_powerpc)
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
const bool UseFramePointer = true;
|
|
|
|
#else
|
|
|
|
const bool UseFramePointer = false;
|
|
|
|
#endif
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
class Assembler {
|
|
|
|
public:
|
|
|
|
|
2008-03-13 23:43:11 +00:00
|
|
|
class Client {
|
|
|
|
public:
|
2008-05-06 21:13:02 +00:00
|
|
|
virtual int acquireTemporary
|
|
|
|
(uint32_t mask = ~static_cast<uint32_t>(0)) = 0;
|
2008-03-13 23:43:11 +00:00
|
|
|
virtual void releaseTemporary(int r) = 0;
|
2008-04-27 20:15:18 +00:00
|
|
|
|
|
|
|
virtual void save(int r) = 0;
|
2008-03-13 23:43:11 +00:00
|
|
|
};
|
|
|
|
|
2008-09-07 20:12:11 +00:00
|
|
|
class Block {
|
|
|
|
public:
|
|
|
|
virtual unsigned resolve(unsigned start, Block* next) = 0;
|
|
|
|
};
|
|
|
|
|
2008-08-17 19:32:40 +00:00
|
|
|
class Architecture {
|
|
|
|
public:
|
2009-09-26 19:43:44 +00:00
|
|
|
virtual unsigned floatRegisterSize() = 0;
|
|
|
|
|
2013-02-12 04:31:19 +00:00
|
|
|
virtual const RegisterFile* registerFile() = 0;
|
2008-03-13 23:43:11 +00:00
|
|
|
|
2011-09-24 04:21:54 +00:00
|
|
|
virtual int scratch() = 0;
|
2008-08-17 19:32:40 +00:00
|
|
|
virtual int stack() = 0;
|
|
|
|
virtual int thread() = 0;
|
2009-02-28 21:20:43 +00:00
|
|
|
virtual int returnLow() = 0;
|
2008-08-23 18:04:36 +00:00
|
|
|
virtual int returnHigh() = 0;
|
2009-05-03 20:57:11 +00:00
|
|
|
virtual int virtualCallTarget() = 0;
|
2009-04-19 22:36:11 +00:00
|
|
|
virtual int virtualCallIndex() = 0;
|
2008-08-23 18:04:36 +00:00
|
|
|
|
2009-03-01 19:28:17 +00:00
|
|
|
virtual bool bigEndian() = 0;
|
2009-09-26 19:43:44 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
virtual uintptr_t maximumImmediateJump() = 0;
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
|
|
|
|
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
|
2009-03-01 19:28:17 +00:00
|
|
|
|
2008-08-23 18:04:36 +00:00
|
|
|
virtual bool reserved(int register_) = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2009-04-25 17:49:56 +00:00
|
|
|
virtual unsigned frameFootprint(unsigned footprint) = 0;
|
2009-02-26 03:49:42 +00:00
|
|
|
virtual unsigned argumentFootprint(unsigned footprint) = 0;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
virtual bool argumentAlignment() = 0;
|
2011-08-11 03:33:56 +00:00
|
|
|
virtual bool argumentRegisterAlignment() = 0;
|
2008-08-17 19:32:40 +00:00
|
|
|
virtual unsigned argumentRegisterCount() = 0;
|
|
|
|
virtual int argumentRegister(unsigned index) = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2011-02-21 22:25:52 +00:00
|
|
|
virtual bool hasLinkRegister() = 0;
|
|
|
|
|
2009-04-25 17:49:56 +00:00
|
|
|
virtual unsigned stackAlignmentInWords() = 0;
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
virtual bool matchCall(void* returnAddress, void* target) = 0;
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
virtual void updateCall(lir::UnaryOperation op, void* returnAddress,
|
2009-10-18 00:18:03 +00:00
|
|
|
void* newTarget) = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
virtual void setConstant(void* dst, uint64_t constant) = 0;
|
2009-03-10 00:52:09 +00:00
|
|
|
|
2008-08-17 19:32:40 +00:00
|
|
|
virtual unsigned alignFrameSize(unsigned sizeInWords) = 0;
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
void* link, bool mostRecent,
|
2011-01-26 00:22:43 +00:00
|
|
|
unsigned targetParameterFootprint, void** ip,
|
|
|
|
void** stack) = 0;
|
2008-08-18 15:23:01 +00:00
|
|
|
virtual void* frameIp(void* stack) = 0;
|
|
|
|
virtual unsigned frameHeaderSize() = 0;
|
2008-11-08 22:36:38 +00:00
|
|
|
virtual unsigned frameReturnAddressSize() = 0;
|
2008-08-18 15:23:01 +00:00
|
|
|
virtual unsigned frameFooterSize() = 0;
|
2009-04-22 01:39:25 +00:00
|
|
|
virtual int returnAddressOffset() = 0;
|
|
|
|
virtual int framePointerOffset() = 0;
|
2008-08-17 19:32:40 +00:00
|
|
|
|
|
|
|
virtual void plan
|
2013-02-11 15:07:46 +00:00
|
|
|
(lir::UnaryOperation op,
|
2008-08-17 19:32:40 +00:00
|
|
|
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
|
|
|
bool* thunk) = 0;
|
|
|
|
|
2009-08-06 14:44:15 +00:00
|
|
|
virtual void planSource
|
2013-02-11 15:07:46 +00:00
|
|
|
(lir::BinaryOperation op,
|
2008-08-17 19:32:40 +00:00
|
|
|
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
2009-08-06 14:44:15 +00:00
|
|
|
unsigned bSize, bool* thunk) = 0;
|
|
|
|
|
|
|
|
virtual void planDestination
|
2013-02-11 15:07:46 +00:00
|
|
|
(lir::BinaryOperation op,
|
2009-10-04 19:56:48 +00:00
|
|
|
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
|
2009-08-06 14:44:15 +00:00
|
|
|
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask) = 0;
|
2008-08-17 19:32:40 +00:00
|
|
|
|
2009-10-04 19:56:48 +00:00
|
|
|
virtual void planMove
|
2009-11-28 04:15:12 +00:00
|
|
|
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
|
|
|
|
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
|
|
|
|
uint8_t dstTypeMask, uint64_t dstRegisterMask) = 0;
|
2009-10-04 19:56:48 +00:00
|
|
|
|
2009-08-06 14:44:15 +00:00
|
|
|
virtual void planSource
|
2013-02-11 15:07:46 +00:00
|
|
|
(lir::TernaryOperation op,
|
2008-08-17 19:32:40 +00:00
|
|
|
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
|
|
|
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
|
2009-08-06 14:44:15 +00:00
|
|
|
unsigned cSize, bool* thunk) = 0;
|
|
|
|
|
|
|
|
virtual void planDestination
|
2013-02-11 15:07:46 +00:00
|
|
|
(lir::TernaryOperation op,
|
2009-10-04 19:56:48 +00:00
|
|
|
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
|
|
|
|
unsigned bSize, uint8_t bTypeMask, uint64_t bRegisterMask,
|
2013-02-12 15:15:30 +00:00
|
|
|
unsigned cSize, uint8_t* cTypeMask, uint64_t* cRegisterMask) = 0;
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0;
|
2008-08-16 18:46:14 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
virtual void acquire() = 0;
|
|
|
|
virtual void release() = 0;
|
2008-08-17 19:32:40 +00:00
|
|
|
};
|
2008-07-05 20:21:13 +00:00
|
|
|
|
2008-08-17 19:32:40 +00:00
|
|
|
virtual void setClient(Client* client) = 0;
|
|
|
|
|
2008-08-23 18:04:36 +00:00
|
|
|
virtual Architecture* arch() = 0;
|
|
|
|
|
2011-01-30 21:14:57 +00:00
|
|
|
virtual void checkStackOverflow(uintptr_t handler,
|
|
|
|
unsigned stackLimitOffsetFromThread) = 0;
|
2011-02-20 03:33:26 +00:00
|
|
|
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset) = 0;
|
2008-09-05 15:00:38 +00:00
|
|
|
virtual void pushFrame(unsigned argumentCount, ...) = 0;
|
|
|
|
virtual void allocateFrame(unsigned footprint) = 0;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
virtual void adjustFrame(unsigned difference) = 0;
|
|
|
|
virtual void popFrame(unsigned footprint) = 0;
|
2009-04-19 22:36:11 +00:00
|
|
|
virtual void popFrameForTailCall(unsigned footprint, int offset,
|
|
|
|
int returnAddressSurrogate,
|
|
|
|
int framePointerSurrogate) = 0;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
|
|
|
|
unsigned argumentFootprint)
|
2009-04-19 22:36:11 +00:00
|
|
|
= 0;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,
|
|
|
|
unsigned stackOffsetFromThread)
|
2009-04-25 17:49:56 +00:00
|
|
|
= 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
virtual void apply(lir::Operation op) = 0;
|
|
|
|
virtual void apply(lir::UnaryOperation op, OperandInfo a) = 0;
|
|
|
|
virtual void apply(lir::BinaryOperation op, OperandInfo a, OperandInfo b) = 0;
|
|
|
|
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c) = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
virtual void setDestination(uint8_t* dst) = 0;
|
|
|
|
|
|
|
|
virtual void write() = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2010-12-07 22:57:11 +00:00
|
|
|
virtual Promise* offset(bool forTrace = false) = 0;
|
2008-08-30 20:12:27 +00:00
|
|
|
|
2008-09-09 00:31:19 +00:00
|
|
|
virtual Block* endBlock(bool startNew) = 0;
|
2008-08-30 20:12:27 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
virtual void endEvent() = 0;
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
virtual unsigned length() = 0;
|
2009-10-20 14:20:49 +00:00
|
|
|
|
2011-01-30 21:14:57 +00:00
|
|
|
virtual unsigned footerSize() = 0;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
virtual void dispose() = 0;
|
|
|
|
};
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
} // namespace codegen
|
|
|
|
} // namespace avian
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
#endif // AVIAN_CODEGEN_ASSEMBLER_H
|