mirror of
https://github.com/corda/corda.git
synced 2025-01-07 13:38:47 +00:00
further split out arm assembler
This commit is contained in:
parent
fd59e1e08d
commit
22d6ed1bec
File diff suppressed because it is too large
Load Diff
@ -13,7 +13,6 @@
|
||||
|
||||
#include <avian/vm/codegen/lir.h>
|
||||
#include <avian/vm/codegen/assembler.h>
|
||||
#include "alloc-vector.h"
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
|
@ -53,15 +53,6 @@ class Context {
|
||||
unsigned constantPoolCount;
|
||||
};
|
||||
|
||||
class Task {
|
||||
public:
|
||||
Task(Task* next): next(next) { }
|
||||
|
||||
virtual void run(Context* con) = 0;
|
||||
|
||||
Task* next;
|
||||
};
|
||||
|
||||
typedef void (*OperationType)(Context*);
|
||||
|
||||
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
|
||||
|
184
src/codegen/arm/encode.h
Normal file
184
src/codegen/arm/encode.h
Normal file
@ -0,0 +1,184 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H
|
||||
#define AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H
|
||||
|
||||
#include <avian/vm/codegen/lir.h>
|
||||
#include <avian/vm/codegen/assembler.h>
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
namespace isa {
|
||||
|
||||
// SYSTEM REGISTERS
|
||||
const int FPSID = 0x0;
|
||||
const int FPSCR = 0x1;
|
||||
const int FPEXC = 0x8;
|
||||
// INSTRUCTION OPTIONS
|
||||
enum CONDITION { EQ, NE, CS, CC, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV };
|
||||
enum SHIFTOP { LSL, LSR, ASR, ROR };
|
||||
// INSTRUCTION FORMATS
|
||||
inline int DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm)
|
||||
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
|
||||
inline int DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm)
|
||||
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | Rs<<8 | Sh<<5 | 1<<4 | Rm; }
|
||||
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm)
|
||||
{ return cond<<28 | 1<<25 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | rot<<8 | (imm&0xff); }
|
||||
inline int BRANCH(int cond, int L, int offset)
|
||||
{ return cond<<28 | 5<<25 | L<<24 | (offset&0xffffff); }
|
||||
inline int BRANCHX(int cond, int L, int Rm)
|
||||
{ return cond<<28 | 0x4bffc<<6 | L<<5 | 1<<4 | Rm; }
|
||||
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm)
|
||||
{ return cond<<28 | mul<<21 | S<<20 | Rd<<16 | Rn<<12 | Rs<<8 | 9<<4 | Rm; }
|
||||
inline int XFER(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int shift, int Sh, int Rm)
|
||||
{ return cond<<28 | 3<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
|
||||
inline int XFERI(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int offset)
|
||||
{ return cond<<28 | 2<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | (offset&0xfff); }
|
||||
inline int XFER2(int cond, int P, int U, int W, int L, int Rn, int Rd, int S, int H, int Rm)
|
||||
{ return cond<<28 | P<<24 | U<<23 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | 1<<7 | S<<6 | H<<5 | 1<<4 | Rm; }
|
||||
inline int XFER2I(int cond, int P, int U, int W, int L, int Rn, int Rd, int offsetH, int S, int H, int offsetL)
|
||||
{ return cond<<28 | P<<24 | U<<23 | 1<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | offsetH<<8 | 1<<7 | S<<6 | H<<5 | 1<<4 | (offsetL&0xf); }
|
||||
inline int COOP(int cond, int opcode_1, int CRn, int CRd, int cp_num, int opcode_2, int CRm)
|
||||
{ return cond<<28 | 0xe<<24 | opcode_1<<20 | CRn<<16 | CRd<<12 | cp_num<<8 | opcode_2<<5 | CRm; }
|
||||
inline int COXFER(int cond, int P, int U, int N, int W, int L, int Rn, int CRd, int cp_num, int offset) // offset is in words, not bytes
|
||||
{ return cond<<28 | 0x6<<25 | P<<24 | U<<23 | N<<22 | W<<21 | L<<20 | Rn<<16 | CRd<<12 | cp_num<<8 | (offset&0xff)>>2; }
|
||||
inline int COREG(int cond, int opcode_1, int L, int CRn, int Rd, int cp_num, int opcode_2, int CRm)
|
||||
{ return cond<<28 | 0xe<<24 | opcode_1<<21 | L<<20 | CRn<<16 | Rd<<12 | cp_num<<8 | opcode_2<<5 | 1<<4 | CRm; }
|
||||
inline int COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm)
|
||||
{ return cond<<28 | 0xc4<<20 | L<<20 | Rn<<16 | Rd<<12 | cp_num<<8 | opcode<<4 | CRm;}
|
||||
// FIELD CALCULATORS
|
||||
inline int calcU(int imm) { return imm >= 0 ? 1 : 0; }
|
||||
// INSTRUCTIONS
|
||||
// The "cond" and "S" fields are set using the SETCOND() and SETS() functions
|
||||
inline int b(int offset) { return BRANCH(AL, 0, offset); }
|
||||
inline int bl(int offset) { return BRANCH(AL, 1, offset); }
|
||||
inline int bx(int Rm) { return BRANCHX(AL, 0, Rm); }
|
||||
inline int blx(int Rm) { return BRANCHX(AL, 1, Rm); }
|
||||
inline int and_(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int eor(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int rsb(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int add(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int adc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int rsc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int cmp(int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm); }
|
||||
inline int orr(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm); }
|
||||
inline int mov(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm); }
|
||||
inline int mvn(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm); }
|
||||
inline int andi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm); }
|
||||
inline int subi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm); }
|
||||
inline int rsbi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm); }
|
||||
inline int addi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm); }
|
||||
inline int adci(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm); }
|
||||
inline int bici(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm); }
|
||||
inline int cmpi(int Rn, int imm, int rot=0) { return DATAI(AL, 0xa, 1, Rn, 0, rot, imm); }
|
||||
inline int movi(int Rd, int imm, int rot=0) { return DATAI(AL, 0xd, 0, 0, Rd, rot, imm); }
|
||||
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh) { return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm); }
|
||||
inline int movsh(int Rd, int Rm, int Rs, int Sh) { return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm); }
|
||||
inline int mul(int Rd, int Rm, int Rs) { return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm); }
|
||||
inline int mla(int Rd, int Rm, int Rs, int Rn) { return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm); }
|
||||
inline int umull(int RdLo, int RdHi, int Rm, int Rs) { return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm); }
|
||||
inline int ldr(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm); }
|
||||
inline int ldri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm)); }
|
||||
inline int ldrb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm); }
|
||||
inline int ldrbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm)); }
|
||||
inline int str(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm); }
|
||||
inline int stri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm)); }
|
||||
inline int strb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm); }
|
||||
inline int strbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm)); }
|
||||
inline int ldrh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm); }
|
||||
inline int ldrhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
|
||||
inline int strh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm); }
|
||||
inline int strhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 0, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
|
||||
inline int ldrsh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm); }
|
||||
inline int ldrshi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 1, abs(imm)&0xf); }
|
||||
inline int ldrsb(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm); }
|
||||
inline int ldrsbi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 0, abs(imm)&0xf); }
|
||||
// breakpoint instruction, this really has its own instruction format
|
||||
inline int bkpt(int16_t immed) { return 0xe1200070 | (((unsigned)immed & 0xffff) >> 4 << 8) | (immed & 0xf); }
|
||||
// COPROCESSOR INSTRUCTIONS
|
||||
inline int mcr(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm); }
|
||||
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm); }
|
||||
inline int mrc(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm); }
|
||||
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm); }
|
||||
// VFP FLOATING-POINT INSTRUCTIONS
|
||||
inline int fmuls(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|2, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
||||
inline int fadds(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
||||
inline int fsubs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1)|2, Sm>>1); }
|
||||
inline int fdivs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|8, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
||||
inline int fmuld(int Dd, int Dn, int Dm) { return COOP(AL, 2, Dn, Dd, 11, 0, Dm); }
|
||||
inline int faddd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 0, Dm); }
|
||||
inline int fsubd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 2, Dm); }
|
||||
inline int fdivd(int Dd, int Dn, int Dm) { return COOP(AL, 8, Dn, Dd, 11, 0, Dm); }
|
||||
inline int fcpys(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
||||
inline int fabss(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
||||
inline int fnegs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
||||
inline int fsqrts(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
||||
inline int fcmps(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 4, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
||||
inline int fcvtds(int Dd, int Sm) { return COOP(AL, 0xb, 7, Dd, 10, 6|(Sm&1), Sm>>1); }
|
||||
inline int fsitos(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 8, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
||||
inline int ftosizs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
||||
inline int fcpyd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 2, Dm); }
|
||||
inline int fabsd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 6, Dm); }
|
||||
inline int fnegd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 2, Dm); }
|
||||
inline int fsqrtd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 6, Dm); }
|
||||
// double-precision comparison instructions
|
||||
inline int fcmpd(int Dd, int Dm) { return COOP(AL, 0xb, 4, Dd, 11, 2, Dm); }
|
||||
// double-precision conversion instructions
|
||||
inline int fcvtsd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 7, Sd>>1, 11, 6, Dm); }
|
||||
inline int fsitod(int Dd, int Sm) { return COOP(AL, 0xb, 8, Dd, 11, 6|(Sm&1), Sm>>1); }
|
||||
inline int ftosizd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 11, 6, Dm); }
|
||||
// single load/store instructions for both precision types
|
||||
inline int flds(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 1, Rn, Sd>>1, 10, offset); };
|
||||
inline int fldd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset); };
|
||||
inline int fsts(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 0, Rn, Sd>>1, 10, offset); };
|
||||
inline int fstd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset); };
|
||||
// move between GPRs and FPRs
|
||||
inline int fmsr(int Sn, int Rd) { return mcr(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
|
||||
inline int fmrs(int Rd, int Sn) { return mrc(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
|
||||
// move to/from VFP system registers
|
||||
inline int fmrx(int Rd, int reg) { return mrc(10, 7, Rd, reg, 0); }
|
||||
// these move around pairs of single-precision registers
|
||||
inline int fmdrr(int Dm, int Rd, int Rn) { return mcrr(11, 1, Rd, Rn, Dm); }
|
||||
inline int fmrrd(int Rd, int Rn, int Dm) { return mrrc(11, 1, Rd, Rn, Dm); }
|
||||
// FLAG SETTERS
|
||||
inline int SETCOND(int ins, int cond) { return ((ins&0x0fffffff) | (cond<<28)); }
|
||||
inline int SETS(int ins) { return ins | 1<<20; }
|
||||
// PSEUDO-INSTRUCTIONS
|
||||
inline int lsl(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSL); }
|
||||
inline int lsli(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSL, imm); }
|
||||
inline int lsr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSR); }
|
||||
inline int lsri(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSR, imm); }
|
||||
inline int asr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, ASR); }
|
||||
inline int asri(int Rd, int Rm, int imm) { return mov(Rd, Rm, ASR, imm); }
|
||||
inline int beq(int offset) { return SETCOND(b(offset), EQ); }
|
||||
inline int bne(int offset) { return SETCOND(b(offset), NE); }
|
||||
inline int bls(int offset) { return SETCOND(b(offset), LS); }
|
||||
inline int bhi(int offset) { return SETCOND(b(offset), HI); }
|
||||
inline int blt(int offset) { return SETCOND(b(offset), LT); }
|
||||
inline int bgt(int offset) { return SETCOND(b(offset), GT); }
|
||||
inline int ble(int offset) { return SETCOND(b(offset), LE); }
|
||||
inline int bge(int offset) { return SETCOND(b(offset), GE); }
|
||||
inline int blo(int offset) { return SETCOND(b(offset), CC); }
|
||||
inline int bhs(int offset) { return SETCOND(b(offset), CS); }
|
||||
inline int bpl(int offset) { return SETCOND(b(offset), PL); }
|
||||
inline int fmstat() { return fmrx(15, FPSCR); }
|
||||
|
||||
} // namespace isa
|
||||
|
||||
inline void emit(Context* con, int code) { con->code.append4(code); }
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_ENCODE_H
|
175
src/codegen/arm/fixup.cpp
Normal file
175
src/codegen/arm/fixup.cpp
Normal file
@ -0,0 +1,175 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#include "context.h"
|
||||
#include "fixup.h"
|
||||
#include "block.h"
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
unsigned padding(MyBlock*, unsigned);
|
||||
|
||||
OffsetPromise::OffsetPromise(Context* con, MyBlock* block, unsigned offset, bool forTrace):
|
||||
con(con), block(block), offset(offset), forTrace(forTrace)
|
||||
{ }
|
||||
|
||||
bool OffsetPromise::resolved() {
|
||||
return block->start != static_cast<unsigned>(~0);
|
||||
}
|
||||
|
||||
int64_t OffsetPromise::value() {
|
||||
assert(con, resolved());
|
||||
|
||||
unsigned o = offset - block->offset;
|
||||
return block->start + padding
|
||||
(block, forTrace ? o - vm::TargetBytesPerWord : o) + o;
|
||||
}
|
||||
|
||||
|
||||
Promise* offsetPromise(Context* con, bool forTrace) {
|
||||
return new(con->zone) OffsetPromise(con, con->lastBlock, con->code.length(), forTrace);
|
||||
}
|
||||
|
||||
|
||||
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction):
|
||||
s(s),
|
||||
instruction(instruction)
|
||||
{ }
|
||||
|
||||
bool OffsetListener::resolve(int64_t value, void** location) {
|
||||
void* p = updateOffset(s, instruction, value);
|
||||
if (location) *location = p;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset):
|
||||
Task(next),
|
||||
promise(promise),
|
||||
instructionOffset(instructionOffset)
|
||||
{ }
|
||||
|
||||
void OffsetTask::run(Context* con) {
|
||||
if (promise->resolved()) {
|
||||
updateOffset
|
||||
(con->s, con->result + instructionOffset->value(), promise->value());
|
||||
} else {
|
||||
new (promise->listen(sizeof(OffsetListener)))
|
||||
OffsetListener(con->s, con->result + instructionOffset->value());
|
||||
}
|
||||
}
|
||||
|
||||
void appendOffsetTask(Context* con, Promise* promise, Promise* instructionOffset) {
|
||||
con->tasks = new(con->zone) OffsetTask(con->tasks, promise, instructionOffset);
|
||||
}
|
||||
|
||||
bool bounded(int right, int left, int32_t v) {
|
||||
return ((v << left) >> left) == v and ((v >> right) << right) == v;
|
||||
}
|
||||
|
||||
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value) {
|
||||
// ARM's PC is two words ahead, and branches drop the bottom 2 bits.
|
||||
int32_t v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
|
||||
|
||||
int32_t mask;
|
||||
expect(s, bounded(0, 8, v));
|
||||
mask = 0xFFFFFF;
|
||||
|
||||
int32_t* p = reinterpret_cast<int32_t*>(instruction);
|
||||
*p = (v & mask) | ((~mask) & *p);
|
||||
|
||||
return instruction + 4;
|
||||
}
|
||||
|
||||
ConstantPoolEntry::ConstantPoolEntry(Context* con, Promise* constant, ConstantPoolEntry* next,
|
||||
Promise* callOffset):
|
||||
con(con), constant(constant), next(next), callOffset(callOffset),
|
||||
address(0)
|
||||
{ }
|
||||
|
||||
int64_t ConstantPoolEntry::value() {
|
||||
assert(con, resolved());
|
||||
|
||||
return reinterpret_cast<int64_t>(address);
|
||||
}
|
||||
|
||||
bool ConstantPoolEntry::resolved() {
|
||||
return address != 0;
|
||||
}
|
||||
|
||||
ConstantPoolListener::ConstantPoolListener(vm::System* s, vm::target_uintptr_t* address,
|
||||
uint8_t* returnAddress):
|
||||
s(s),
|
||||
address(address),
|
||||
returnAddress(returnAddress)
|
||||
{ }
|
||||
|
||||
bool ConstantPoolListener::resolve(int64_t value, void** location) {
|
||||
*address = value;
|
||||
if (location) {
|
||||
*location = returnAddress ? static_cast<void*>(returnAddress) : address;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
PoolOffset::PoolOffset(MyBlock* block, ConstantPoolEntry* entry, unsigned offset):
|
||||
block(block), entry(entry), next(0), offset(offset)
|
||||
{ }
|
||||
|
||||
PoolEvent::PoolEvent(PoolOffset* poolOffsetHead, PoolOffset* poolOffsetTail,
|
||||
unsigned offset):
|
||||
poolOffsetHead(poolOffsetHead), poolOffsetTail(poolOffsetTail), next(0),
|
||||
offset(offset)
|
||||
{ }
|
||||
|
||||
void appendConstantPoolEntry(Context* con, Promise* constant, Promise* callOffset) {
|
||||
if (constant->resolved()) {
|
||||
// make a copy, since the original might be allocated on the
|
||||
// stack, and we need our copy to live until assembly is complete
|
||||
constant = new(con->zone) ResolvedPromise(constant->value());
|
||||
}
|
||||
|
||||
con->constantPool = new(con->zone) ConstantPoolEntry(con, constant, con->constantPool, callOffset);
|
||||
|
||||
++ con->constantPoolCount;
|
||||
|
||||
PoolOffset* o = new(con->zone) PoolOffset(con->lastBlock, con->constantPool, con->code.length() - con->lastBlock->offset);
|
||||
|
||||
if (DebugPool) {
|
||||
fprintf(stderr, "add pool offset %p %d to block %p\n",
|
||||
o, o->offset, con->lastBlock);
|
||||
}
|
||||
|
||||
if (con->lastBlock->poolOffsetTail) {
|
||||
con->lastBlock->poolOffsetTail->next = o;
|
||||
} else {
|
||||
con->lastBlock->poolOffsetHead = o;
|
||||
}
|
||||
con->lastBlock->poolOffsetTail = o;
|
||||
}
|
||||
|
||||
void appendPoolEvent(Context* con, MyBlock* b, unsigned offset, PoolOffset* head,
|
||||
PoolOffset* tail)
|
||||
{
|
||||
PoolEvent* e = new(con->zone) PoolEvent(head, tail, offset);
|
||||
|
||||
if (b->poolEventTail) {
|
||||
b->poolEventTail->next = e;
|
||||
} else {
|
||||
b->poolEventHead = e;
|
||||
}
|
||||
b->poolEventTail = e;
|
||||
}
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
140
src/codegen/arm/fixup.h
Normal file
140
src/codegen/arm/fixup.h
Normal file
@ -0,0 +1,140 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H
|
||||
#define AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H
|
||||
|
||||
#include "target.h"
|
||||
|
||||
#include <avian/vm/codegen/lir.h>
|
||||
#include <avian/vm/codegen/assembler.h>
|
||||
#include "alloc-vector.h"
|
||||
|
||||
namespace vm {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
const bool DebugPool = false;
|
||||
|
||||
const int32_t PoolOffsetMask = 0xFFF;
|
||||
|
||||
class Task {
|
||||
public:
|
||||
Task(Task* next): next(next) { }
|
||||
|
||||
virtual void run(Context* con) = 0;
|
||||
|
||||
Task* next;
|
||||
};
|
||||
|
||||
class OffsetPromise: public Promise {
|
||||
public:
|
||||
OffsetPromise(Context* con, MyBlock* block, unsigned offset, bool forTrace);
|
||||
|
||||
virtual bool resolved();
|
||||
|
||||
virtual int64_t value();
|
||||
|
||||
Context* con;
|
||||
MyBlock* block;
|
||||
unsigned offset;
|
||||
bool forTrace;
|
||||
};
|
||||
|
||||
Promise* offsetPromise(Context* con, bool forTrace = false);
|
||||
|
||||
class OffsetListener: public Promise::Listener {
|
||||
public:
|
||||
OffsetListener(vm::System* s, uint8_t* instruction);
|
||||
|
||||
virtual bool resolve(int64_t value, void** location);
|
||||
|
||||
vm::System* s;
|
||||
uint8_t* instruction;
|
||||
};
|
||||
|
||||
class OffsetTask: public Task {
|
||||
public:
|
||||
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset);
|
||||
|
||||
virtual void run(Context* con);
|
||||
|
||||
Promise* promise;
|
||||
Promise* instructionOffset;
|
||||
};
|
||||
|
||||
void appendOffsetTask(Context* con, Promise* promise, Promise* instructionOffset);
|
||||
|
||||
void* updateOffset(vm::System* s, uint8_t* instruction, int64_t value);
|
||||
|
||||
class ConstantPoolEntry: public Promise {
|
||||
public:
|
||||
ConstantPoolEntry(Context* con, Promise* constant, ConstantPoolEntry* next,
|
||||
Promise* callOffset);
|
||||
|
||||
virtual int64_t value();
|
||||
|
||||
virtual bool resolved();
|
||||
|
||||
Context* con;
|
||||
Promise* constant;
|
||||
ConstantPoolEntry* next;
|
||||
Promise* callOffset;
|
||||
void* address;
|
||||
unsigned constantPoolCount;
|
||||
};
|
||||
|
||||
class ConstantPoolListener: public Promise::Listener {
|
||||
public:
|
||||
ConstantPoolListener(vm::System* s, vm::target_uintptr_t* address,
|
||||
uint8_t* returnAddress);
|
||||
|
||||
virtual bool resolve(int64_t value, void** location);
|
||||
|
||||
vm::System* s;
|
||||
vm::target_uintptr_t* address;
|
||||
uint8_t* returnAddress;
|
||||
};
|
||||
|
||||
class PoolOffset {
|
||||
public:
|
||||
PoolOffset(MyBlock* block, ConstantPoolEntry* entry, unsigned offset);
|
||||
|
||||
MyBlock* block;
|
||||
ConstantPoolEntry* entry;
|
||||
PoolOffset* next;
|
||||
unsigned offset;
|
||||
};
|
||||
|
||||
class PoolEvent {
|
||||
public:
|
||||
PoolEvent(PoolOffset* poolOffsetHead, PoolOffset* poolOffsetTail,
|
||||
unsigned offset);
|
||||
|
||||
PoolOffset* poolOffsetHead;
|
||||
PoolOffset* poolOffsetTail;
|
||||
PoolEvent* next;
|
||||
unsigned offset;
|
||||
};
|
||||
|
||||
void appendConstantPoolEntry(Context* con, Promise* constant, Promise* callOffset);
|
||||
|
||||
void appendPoolEvent(Context* con, MyBlock* b, unsigned offset, PoolOffset* head,
|
||||
PoolOffset* tail);
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_PROMISE_H
|
142
src/codegen/arm/multimethod.cpp
Normal file
142
src/codegen/arm/multimethod.cpp
Normal file
@ -0,0 +1,142 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#include "context.h"
|
||||
#include "multimethod.h"
|
||||
#include "operations.h"
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
unsigned index(ArchitectureContext*, lir::UnaryOperation operation, lir::OperandType operand)
|
||||
{
|
||||
return operation + (lir::UnaryOperationCount * operand);
|
||||
}
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
{
|
||||
return operation
|
||||
+ (lir::BinaryOperationCount * operand1)
|
||||
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
|
||||
}
|
||||
|
||||
unsigned index(ArchitectureContext* con UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1)
|
||||
{
|
||||
assert(con, not isBranch(operation));
|
||||
|
||||
return operation + (lir::NonBranchTernaryOperationCount * operand1);
|
||||
}
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* con UNUSED, lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
{
|
||||
return operand1 + (lir::OperandTypeCount * operand2);
|
||||
}
|
||||
|
||||
void populateTables(ArchitectureContext* con) {
|
||||
const lir::OperandType C = lir::ConstantOperand;
|
||||
const lir::OperandType A = lir::AddressOperand;
|
||||
const lir::OperandType R = lir::RegisterOperand;
|
||||
const lir::OperandType M = lir::MemoryOperand;
|
||||
|
||||
OperationType* zo = con->operations;
|
||||
UnaryOperationType* uo = con->unaryOperations;
|
||||
BinaryOperationType* bo = con->binaryOperations;
|
||||
TernaryOperationType* to = con->ternaryOperations;
|
||||
BranchOperationType* bro = con->branchOperations;
|
||||
|
||||
zo[lir::Return] = return_;
|
||||
zo[lir::LoadBarrier] = memoryBarrier;
|
||||
zo[lir::StoreStoreBarrier] = memoryBarrier;
|
||||
zo[lir::StoreLoadBarrier] = memoryBarrier;
|
||||
zo[lir::Trap] = trap;
|
||||
|
||||
uo[index(con, lir::LongCall, C)] = CAST1(longCallC);
|
||||
|
||||
uo[index(con, lir::AlignedLongCall, C)] = CAST1(longCallC);
|
||||
|
||||
uo[index(con, lir::LongJump, C)] = CAST1(longJumpC);
|
||||
|
||||
uo[index(con, lir::AlignedLongJump, C)] = CAST1(longJumpC);
|
||||
|
||||
uo[index(con, lir::Jump, R)] = CAST1(jumpR);
|
||||
uo[index(con, lir::Jump, C)] = CAST1(jumpC);
|
||||
|
||||
uo[index(con, lir::AlignedJump, R)] = CAST1(jumpR);
|
||||
uo[index(con, lir::AlignedJump, C)] = CAST1(jumpC);
|
||||
|
||||
uo[index(con, lir::Call, C)] = CAST1(callC);
|
||||
uo[index(con, lir::Call, R)] = CAST1(callR);
|
||||
|
||||
uo[index(con, lir::AlignedCall, C)] = CAST1(callC);
|
||||
uo[index(con, lir::AlignedCall, R)] = CAST1(callR);
|
||||
|
||||
bo[index(con, lir::Move, R, R)] = CAST2(moveRR);
|
||||
bo[index(con, lir::Move, C, R)] = CAST2(moveCR);
|
||||
bo[index(con, lir::Move, C, M)] = CAST2(moveCM);
|
||||
bo[index(con, lir::Move, M, R)] = CAST2(moveMR);
|
||||
bo[index(con, lir::Move, R, M)] = CAST2(moveRM);
|
||||
bo[index(con, lir::Move, A, R)] = CAST2(moveAR);
|
||||
|
||||
bo[index(con, lir::MoveZ, R, R)] = CAST2(moveZRR);
|
||||
bo[index(con, lir::MoveZ, M, R)] = CAST2(moveZMR);
|
||||
bo[index(con, lir::MoveZ, C, R)] = CAST2(moveCR);
|
||||
|
||||
bo[index(con, lir::Negate, R, R)] = CAST2(negateRR);
|
||||
|
||||
bo[index(con, lir::FloatAbsolute, R, R)] = CAST2(floatAbsoluteRR);
|
||||
bo[index(con, lir::FloatNegate, R, R)] = CAST2(floatNegateRR);
|
||||
bo[index(con, lir::Float2Float, R, R)] = CAST2(float2FloatRR);
|
||||
bo[index(con, lir::Float2Int, R, R)] = CAST2(float2IntRR);
|
||||
bo[index(con, lir::Int2Float, R, R)] = CAST2(int2FloatRR);
|
||||
bo[index(con, lir::FloatSquareRoot, R, R)] = CAST2(floatSqrtRR);
|
||||
|
||||
to[index(con, lir::Add, R)] = CAST3(addR);
|
||||
|
||||
to[index(con, lir::Subtract, R)] = CAST3(subR);
|
||||
|
||||
to[index(con, lir::Multiply, R)] = CAST3(multiplyR);
|
||||
|
||||
to[index(con, lir::FloatAdd, R)] = CAST3(floatAddR);
|
||||
to[index(con, lir::FloatSubtract, R)] = CAST3(floatSubtractR);
|
||||
to[index(con, lir::FloatMultiply, R)] = CAST3(floatMultiplyR);
|
||||
to[index(con, lir::FloatDivide, R)] = CAST3(floatDivideR);
|
||||
|
||||
to[index(con, lir::ShiftLeft, R)] = CAST3(shiftLeftR);
|
||||
to[index(con, lir::ShiftLeft, C)] = CAST3(shiftLeftC);
|
||||
|
||||
to[index(con, lir::ShiftRight, R)] = CAST3(shiftRightR);
|
||||
to[index(con, lir::ShiftRight, C)] = CAST3(shiftRightC);
|
||||
|
||||
to[index(con, lir::UnsignedShiftRight, R)] = CAST3(unsignedShiftRightR);
|
||||
to[index(con, lir::UnsignedShiftRight, C)] = CAST3(unsignedShiftRightC);
|
||||
|
||||
to[index(con, lir::And, R)] = CAST3(andR);
|
||||
to[index(con, lir::And, C)] = CAST3(andC);
|
||||
|
||||
to[index(con, lir::Or, R)] = CAST3(orR);
|
||||
|
||||
to[index(con, lir::Xor, R)] = CAST3(xorR);
|
||||
|
||||
bro[branchIndex(con, R, R)] = CAST_BRANCH(branchRR);
|
||||
bro[branchIndex(con, C, R)] = CAST_BRANCH(branchCR);
|
||||
bro[branchIndex(con, C, M)] = CAST_BRANCH(branchCM);
|
||||
bro[branchIndex(con, R, M)] = CAST_BRANCH(branchRM);
|
||||
}
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
46
src/codegen/arm/multimethod.h
Normal file
46
src/codegen/arm/multimethod.h
Normal file
@ -0,0 +1,46 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H
|
||||
#define AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H
|
||||
|
||||
#include <avian/vm/codegen/lir.h>
|
||||
#include <avian/vm/codegen/assembler.h>
|
||||
|
||||
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
|
||||
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
|
||||
#define CAST3(x) reinterpret_cast<TernaryOperationType>(x)
|
||||
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
unsigned index(ArchitectureContext*, lir::UnaryOperation operation, lir::OperandType operand);
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
|
||||
unsigned index(ArchitectureContext* con UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1);
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* con UNUSED, lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
|
||||
void populateTables(ArchitectureContext* con);
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_MULTIMETHOD_H
|
1235
src/codegen/arm/operations.cpp
Normal file
1235
src/codegen/arm/operations.cpp
Normal file
File diff suppressed because it is too large
Load Diff
240
src/codegen/arm/operations.h
Normal file
240
src/codegen/arm/operations.h
Normal file
@ -0,0 +1,240 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H
|
||||
#define AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H
|
||||
|
||||
#include "registers.h"
|
||||
|
||||
namespace vm {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
class Context;
|
||||
|
||||
// shortcut functions
|
||||
|
||||
inline int newTemp(Context* con) {
|
||||
return con->client->acquireTemporary(GPR_MASK);
|
||||
}
|
||||
|
||||
inline int newTemp(Context* con, unsigned mask) {
|
||||
return con->client->acquireTemporary(mask);
|
||||
}
|
||||
|
||||
inline void freeTemp(Context* con, int r) {
|
||||
con->client->releaseTemporary(r);
|
||||
}
|
||||
|
||||
inline int64_t getValue(lir::Constant* con) {
|
||||
return con->value->value();
|
||||
}
|
||||
|
||||
inline lir::Register makeTemp(Context* con) {
|
||||
lir::Register tmp(newTemp(con));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
inline lir::Register makeTemp64(Context* con) {
|
||||
lir::Register tmp(newTemp(con), newTemp(con));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
inline void freeTemp(Context* con, const lir::Register& tmp) {
|
||||
if (tmp.low != lir::NoRegister) freeTemp(con, tmp.low);
|
||||
if (tmp.high != lir::NoRegister) freeTemp(con, tmp.high);
|
||||
}
|
||||
|
||||
void shiftLeftR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void moveRR(Context* con, unsigned srcSize, lir::Register* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void shiftLeftC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void shiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void shiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void unsignedShiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void unsignedShiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
bool needJump(MyBlock* b);
|
||||
|
||||
unsigned padding(MyBlock* b, unsigned offset);
|
||||
|
||||
void resolve(MyBlock* b);
|
||||
|
||||
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target);
|
||||
|
||||
void swapRR(Context* con, unsigned aSize, lir::Register* a,
|
||||
unsigned bSize, lir::Register* b);
|
||||
|
||||
void moveRR(Context* con, unsigned srcSize, lir::Register* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void moveZRR(Context* con, unsigned srcSize, lir::Register* src,
|
||||
unsigned, lir::Register* dst);
|
||||
|
||||
void moveCR(Context* con, unsigned size, lir::Constant* src,
|
||||
unsigned, lir::Register* dst);
|
||||
|
||||
void moveCR2(Context* con, unsigned size, lir::Constant* src,
|
||||
lir::Register* dst, Promise* callOffset);
|
||||
|
||||
void moveCR(Context* con, unsigned size, lir::Constant* src,
|
||||
unsigned, lir::Register* dst);
|
||||
|
||||
void addR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void subR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void addC(Context* con, unsigned size, lir::Constant* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void subC(Context* con, unsigned size, lir::Constant* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void multiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void floatAbsoluteRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
|
||||
|
||||
void floatNegateRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
|
||||
|
||||
void float2FloatRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
|
||||
|
||||
void float2IntRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
|
||||
|
||||
void int2FloatRR(Context* con, unsigned, lir::Register* a, unsigned size, lir::Register* b);
|
||||
|
||||
void floatSqrtRR(Context* con, unsigned size, lir::Register* a, unsigned, lir::Register* b);
|
||||
|
||||
void floatAddR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void floatSubtractR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void floatMultiplyR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
void floatDivideR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::Register* t);
|
||||
|
||||
int normalize(Context* con, int offset, int index, unsigned scale,
|
||||
bool* preserveIndex, bool* release);
|
||||
|
||||
void store(Context* con, unsigned size, lir::Register* src,
|
||||
int base, int offset, int index, unsigned scale, bool preserveIndex);
|
||||
|
||||
void moveRM(Context* con, unsigned srcSize, lir::Register* src,
|
||||
unsigned dstSize UNUSED, lir::Memory* dst);
|
||||
|
||||
void load(Context* con, unsigned srcSize, int base, int offset, int index,
|
||||
unsigned scale, unsigned dstSize, lir::Register* dst,
|
||||
bool preserveIndex, bool signExtend);
|
||||
|
||||
void moveMR(Context* con, unsigned srcSize, lir::Memory* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void moveZMR(Context* con, unsigned srcSize, lir::Memory* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void andR(Context* con, unsigned size, lir::Register* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void andC(Context* con, unsigned size, lir::Constant* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void orR(Context* con, unsigned size, lir::Register* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void xorR(Context* con, unsigned size, lir::Register* a,
|
||||
lir::Register* b, lir::Register* dst);
|
||||
|
||||
void moveAR2(Context* con, unsigned srcSize, lir::Address* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void moveAR(Context* con, unsigned srcSize, lir::Address* src,
|
||||
unsigned dstSize, lir::Register* dst);
|
||||
|
||||
void compareRR(Context* con, unsigned aSize, lir::Register* a,
|
||||
unsigned bSize UNUSED, lir::Register* b);
|
||||
|
||||
void compareCR(Context* con, unsigned aSize, lir::Constant* a,
|
||||
unsigned bSize, lir::Register* b);
|
||||
|
||||
void compareCM(Context* con, unsigned aSize, lir::Constant* a,
|
||||
unsigned bSize, lir::Memory* b);
|
||||
|
||||
void compareRM(Context* con, unsigned aSize, lir::Register* a,
|
||||
unsigned bSize, lir::Memory* b);
|
||||
|
||||
int32_t
|
||||
branch(Context* con, lir::TernaryOperation op);
|
||||
|
||||
void conditional(Context* con, int32_t branch, lir::Constant* target);
|
||||
|
||||
void branch(Context* con, lir::TernaryOperation op, lir::Constant* target);
|
||||
|
||||
void branchLong(Context* con, lir::TernaryOperation op, lir::Operand* al,
|
||||
lir::Operand* ah, lir::Operand* bl,
|
||||
lir::Operand* bh, lir::Constant* target,
|
||||
BinaryOperationType compareSigned,
|
||||
BinaryOperationType compareUnsigned);
|
||||
|
||||
void branchRR(Context* con, lir::TernaryOperation op, unsigned size,
|
||||
lir::Register* a, lir::Register* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchCR(Context* con, lir::TernaryOperation op, unsigned size,
|
||||
lir::Constant* a, lir::Register* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchRM(Context* con, lir::TernaryOperation op, unsigned size,
|
||||
lir::Register* a, lir::Memory* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchCM(Context* con, lir::TernaryOperation op, unsigned size,
|
||||
lir::Constant* a, lir::Memory* b,
|
||||
lir::Constant* target);
|
||||
|
||||
ShiftMaskPromise*
|
||||
shiftMaskPromise(Context* con, Promise* base, unsigned shift, int64_t mask);
|
||||
|
||||
void moveCM(Context* con, unsigned srcSize, lir::Constant* src,
|
||||
unsigned dstSize, lir::Memory* dst);
|
||||
|
||||
void negateRR(Context* con, unsigned srcSize, lir::Register* src,
|
||||
unsigned dstSize UNUSED, lir::Register* dst);
|
||||
|
||||
void callR(Context* con, unsigned size UNUSED, lir::Register* target);
|
||||
|
||||
void callC(Context* con, unsigned size UNUSED, lir::Constant* target);
|
||||
|
||||
void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target);
|
||||
|
||||
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target);
|
||||
|
||||
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target);
|
||||
|
||||
void return_(Context* con);
|
||||
|
||||
void trap(Context* con);
|
||||
|
||||
void memoryBarrier(Context*);
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_OPERATIONS_H
|
||||
|
52
src/codegen/arm/registers.h
Normal file
52
src/codegen/arm/registers.h
Normal file
@ -0,0 +1,52 @@
|
||||
/* Copyright (c) 2008-2012, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#ifndef AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H
|
||||
#define AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H
|
||||
|
||||
#include <avian/vm/codegen/lir.h>
|
||||
#include <avian/vm/codegen/assembler.h>
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
|
||||
const uint64_t MASK_LO32 = 0xffffffff;
|
||||
const unsigned MASK_LO16 = 0xffff;
|
||||
const unsigned MASK_LO8 = 0xff;
|
||||
|
||||
const int N_GPRS = 16;
|
||||
const int N_FPRS = 16;
|
||||
const uint32_t GPR_MASK = 0xffff;
|
||||
const uint32_t FPR_MASK = 0xffff0000;
|
||||
|
||||
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32;
|
||||
const uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
|
||||
|
||||
inline bool isFpr(lir::Register* reg) {
|
||||
return reg->low >= N_GPRS;
|
||||
}
|
||||
|
||||
inline int fpr64(int reg) { return reg - N_GPRS; }
|
||||
inline int fpr64(lir::Register* reg) { return fpr64(reg->low); }
|
||||
inline int fpr32(int reg) { return fpr64(reg) << 1; }
|
||||
inline int fpr32(lir::Register* reg) { return fpr64(reg) << 1; }
|
||||
|
||||
const int ThreadRegister = 8;
|
||||
const int StackRegister = 13;
|
||||
const int LinkRegister = 14;
|
||||
const int ProgramCounter = 15;
|
||||
|
||||
} // namespace arm
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
#endif // AVIAN_CODEGEN_ASSEMBLER_ARM_REGISTERS_H
|
@ -477,6 +477,12 @@ hash(const uint16_t* s, unsigned length)
|
||||
return h;
|
||||
}
|
||||
|
||||
inline void
|
||||
write4(uint8_t* dst, uint32_t v)
|
||||
{
|
||||
memcpy(dst, &v, 4);
|
||||
}
|
||||
|
||||
inline uint32_t
|
||||
floatToBits(float f)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user