2012-01-18 12:41:51 -07:00
|
|
|
/* Copyright (c) 2010-2012, Avian Contributors
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
|
|
|
#include "assembler.h"
|
|
|
|
#include "vector.h"
|
|
|
|
|
|
|
|
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
|
|
|
|
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
|
|
|
|
#define CAST3(x) reinterpret_cast<TernaryOperationType>(x)
|
2010-07-12 14:18:36 -06:00
|
|
|
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
using namespace vm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
namespace isa {
|
2011-09-28 15:08:23 -06:00
|
|
|
// SYSTEM REGISTERS
|
|
|
|
const int FPSID = 0x0;
|
|
|
|
const int FPSCR = 0x1;
|
|
|
|
const int FPEXC = 0x8;
|
2009-08-06 11:52:36 -06:00
|
|
|
// INSTRUCTION OPTIONS
|
2009-10-29 10:12:30 -06:00
|
|
|
enum CONDITION { EQ, NE, CS, CC, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV };
|
|
|
|
enum SHIFTOP { LSL, LSR, ASR, ROR };
|
2009-08-06 11:52:36 -06:00
|
|
|
// INSTRUCTION FORMATS
|
|
|
|
inline int DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm)
|
|
|
|
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
|
|
|
|
inline int DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm)
|
|
|
|
{ return cond<<28 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | Rs<<8 | Sh<<5 | 1<<4 | Rm; }
|
|
|
|
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm)
|
2010-08-24 17:59:01 -06:00
|
|
|
{ return cond<<28 | 1<<25 | opcode<<21 | S<<20 | Rn<<16 | Rd<<12 | rot<<8 | (imm&0xff); }
|
2009-08-06 11:52:36 -06:00
|
|
|
inline int BRANCH(int cond, int L, int offset)
|
2010-08-24 17:59:01 -06:00
|
|
|
{ return cond<<28 | 5<<25 | L<<24 | (offset&0xffffff); }
|
2009-08-06 11:52:36 -06:00
|
|
|
inline int BRANCHX(int cond, int L, int Rm)
|
|
|
|
{ return cond<<28 | 0x4bffc<<6 | L<<5 | 1<<4 | Rm; }
|
|
|
|
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm)
|
|
|
|
{ return cond<<28 | mul<<21 | S<<20 | Rd<<16 | Rn<<12 | Rs<<8 | 9<<4 | Rm; }
|
|
|
|
inline int XFER(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int shift, int Sh, int Rm)
|
|
|
|
{ return cond<<28 | 3<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | shift<<7 | Sh<<5 | Rm; }
|
|
|
|
inline int XFERI(int cond, int P, int U, int B, int W, int L, int Rn, int Rd, int offset)
|
2010-08-24 17:59:01 -06:00
|
|
|
{ return cond<<28 | 2<<25 | P<<24 | U<<23 | B<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | (offset&0xfff); }
|
2009-08-06 11:52:36 -06:00
|
|
|
inline int XFER2(int cond, int P, int U, int W, int L, int Rn, int Rd, int S, int H, int Rm)
|
|
|
|
{ return cond<<28 | P<<24 | U<<23 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | 1<<7 | S<<6 | H<<5 | 1<<4 | Rm; }
|
|
|
|
inline int XFER2I(int cond, int P, int U, int W, int L, int Rn, int Rd, int offsetH, int S, int H, int offsetL)
|
2010-11-09 17:31:52 -07:00
|
|
|
{ return cond<<28 | P<<24 | U<<23 | 1<<22 | W<<21 | L<<20 | Rn<<16 | Rd<<12 | offsetH<<8 | 1<<7 | S<<6 | H<<5 | 1<<4 | (offsetL&0xf); }
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int COOP(int cond, int opcode_1, int CRn, int CRd, int cp_num, int opcode_2, int CRm)
|
|
|
|
{ return cond<<28 | 0xe<<24 | opcode_1<<20 | CRn<<16 | CRd<<12 | cp_num<<8 | opcode_2<<5 | CRm; }
|
2012-06-04 12:39:53 -06:00
|
|
|
inline int COXFER(int cond, int P, int U, int N, int W, int L, int Rn, int CRd, int cp_num, int offset) // offset is in words, not bytes
|
|
|
|
{ return cond<<28 | 0x6<<25 | P<<24 | U<<23 | N<<22 | W<<21 | L<<20 | Rn<<16 | CRd<<12 | cp_num<<8 | (offset&0xff)>>2; }
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int COREG(int cond, int opcode_1, int L, int CRn, int Rd, int cp_num, int opcode_2, int CRm)
|
|
|
|
{ return cond<<28 | 0xe<<24 | opcode_1<<21 | L<<20 | CRn<<16 | Rd<<12 | cp_num<<8 | opcode_2<<5 | 1<<4 | CRm; }
|
|
|
|
inline int COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm)
|
|
|
|
{ return cond<<28 | 0xc4<<20 | L<<20 | Rn<<16 | Rd<<12 | cp_num<<8 | opcode<<4 | CRm;}
|
2010-08-24 17:59:01 -06:00
|
|
|
// FIELD CALCULATORS
|
|
|
|
inline int calcU(int imm) { return imm >= 0 ? 1 : 0; }
|
2009-08-06 11:52:36 -06:00
|
|
|
// INSTRUCTIONS
|
2009-10-29 10:12:30 -06:00
|
|
|
// The "cond" and "S" fields are set using the SETCOND() and SETS() functions
|
|
|
|
inline int b(int offset) { return BRANCH(AL, 0, offset); }
|
|
|
|
inline int bl(int offset) { return BRANCH(AL, 1, offset); }
|
|
|
|
inline int bx(int Rm) { return BRANCHX(AL, 0, Rm); }
|
|
|
|
inline int blx(int Rm) { return BRANCHX(AL, 1, Rm); }
|
|
|
|
inline int and_(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int eor(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int rsb(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int add(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int adc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int rsc(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm); }
|
2010-08-27 18:52:33 -06:00
|
|
|
inline int cmp(int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int orr(int Rd, int Rn, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm); }
|
|
|
|
inline int mov(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm); }
|
|
|
|
inline int mvn(int Rd, int Rm, int Sh=0, int shift=0) { return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm); }
|
|
|
|
inline int andi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm); }
|
|
|
|
inline int subi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm); }
|
|
|
|
inline int rsbi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm); }
|
|
|
|
inline int addi(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm); }
|
|
|
|
inline int adci(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm); }
|
2010-09-03 18:32:22 +01:00
|
|
|
inline int bici(int Rd, int Rn, int imm, int rot=0) { return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm); }
|
2010-08-27 18:52:33 -06:00
|
|
|
inline int cmpi(int Rn, int imm, int rot=0) { return DATAI(AL, 0xa, 1, Rn, 0, rot, imm); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int movi(int Rd, int imm, int rot=0) { return DATAI(AL, 0xd, 0, 0, Rd, rot, imm); }
|
2010-09-03 12:52:11 -06:00
|
|
|
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh) { return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int movsh(int Rd, int Rm, int Rs, int Sh) { return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm); }
|
|
|
|
inline int mul(int Rd, int Rm, int Rs) { return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm); }
|
|
|
|
inline int mla(int Rd, int Rm, int Rs, int Rn) { return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm); }
|
2010-09-02 16:09:01 -06:00
|
|
|
inline int umull(int RdLo, int RdHi, int Rm, int Rs) { return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm); }
|
2010-08-27 18:52:33 -06:00
|
|
|
inline int ldr(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm); }
|
|
|
|
inline int ldri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm)); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int ldrb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int ldrbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm)); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int str(int Rd, int Rn, int Rm, int W=0) { return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int stri(int Rd, int Rn, int imm, int W=0) { return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm)); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int strb(int Rd, int Rn, int Rm) { return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int strbi(int Rd, int Rn, int imm) { return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm)); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int ldrh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int ldrhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int strh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int strhi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 0, Rn, Rd, abs(imm)>>4 & 0xf, 0, 1, abs(imm)&0xf); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int ldrsh(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int ldrshi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 1, abs(imm)&0xf); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int ldrsb(int Rd, int Rn, int Rm) { return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm); }
|
2010-08-24 17:59:01 -06:00
|
|
|
inline int ldrsbi(int Rd, int Rn, int imm) { return XFER2I(AL, 1, calcU(imm), 0, 1, Rn, Rd, abs(imm)>>4 & 0xf, 1, 0, abs(imm)&0xf); }
|
2012-05-17 12:22:18 -06:00
|
|
|
// breakpoint instruction, this really has its own instruction format
|
|
|
|
inline int bkpt(int16_t immed) { return 0xe1200070 | (((unsigned)immed & 0xffff) >> 4 << 8) | (immed & 0xf); }
|
2011-09-28 15:08:23 -06:00
|
|
|
// COPROCESSOR INSTRUCTIONS
|
|
|
|
inline int mcr(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm); }
|
|
|
|
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm); }
|
|
|
|
inline int mrc(int coproc, int opcode_1, int Rd, int CRn, int CRm, int opcode_2=0) { return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm); }
|
|
|
|
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm) { return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm); }
|
|
|
|
// VFP FLOATING-POINT INSTRUCTIONS
|
|
|
|
inline int fmuls(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|2, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
|
|
|
inline int fadds(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
|
|
|
inline int fsubs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|3, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1)|2, Sm>>1); }
|
|
|
|
inline int fdivs(int Sd, int Sn, int Sm) { return COOP(AL, (Sd&1)<<2|8, Sn>>1, Sd>>1, 10, (Sn&1)<<2|(Sm&1), Sm>>1); }
|
2011-12-06 09:24:30 -07:00
|
|
|
inline int fmuld(int Dd, int Dn, int Dm) { return COOP(AL, 2, Dn, Dd, 11, 0, Dm); }
|
|
|
|
inline int faddd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 0, Dm); }
|
|
|
|
inline int fsubd(int Dd, int Dn, int Dm) { return COOP(AL, 3, Dn, Dd, 11, 2, Dm); }
|
|
|
|
inline int fdivd(int Dd, int Dn, int Dm) { return COOP(AL, 8, Dn, Dd, 11, 0, Dm); }
|
2012-01-17 14:10:51 -07:00
|
|
|
inline int fcpys(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
|
|
|
inline int fabss(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
|
|
|
inline int fnegs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
|
|
|
inline int fsqrts(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 1, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
|
|
|
inline int fcmps(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 4, Sd>>1, 10, 2|(Sm&1), Sm>>1); }
|
|
|
|
inline int fcvtds(int Dd, int Sm) { return COOP(AL, 0xb, 7, Dd, 10, 6|(Sm&1), Sm>>1); }
|
|
|
|
inline int fsitos(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 8, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
|
|
|
inline int ftosizs(int Sd, int Sm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 10, 6|(Sm&1), Sm>>1); }
|
2011-12-06 09:24:30 -07:00
|
|
|
inline int fcpyd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 2, Dm); }
|
|
|
|
inline int fabsd(int Dd, int Dm) { return COOP(AL, 0xb, 0, Dd, 11, 6, Dm); }
|
|
|
|
inline int fnegd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 2, Dm); }
|
|
|
|
inline int fsqrtd(int Dd, int Dm) { return COOP(AL, 0xb, 1, Dd, 11, 6, Dm); }
|
2012-06-12 17:00:45 -06:00
|
|
|
// double-precision comparison instructions
|
2011-12-06 09:24:30 -07:00
|
|
|
inline int fcmpd(int Dd, int Dm) { return COOP(AL, 0xb, 4, Dd, 11, 2, Dm); }
|
2012-06-12 17:00:45 -06:00
|
|
|
// double-precision conversion instructions
|
2012-01-17 14:10:51 -07:00
|
|
|
inline int fcvtsd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 7, Sd>>1, 11, 6, Dm); }
|
|
|
|
inline int fsitod(int Dd, int Sm) { return COOP(AL, 0xb, 8, Dd, 11, 6|(Sm&1), Sm>>1); }
|
|
|
|
inline int ftosizd(int Sd, int Dm) { return COOP(AL, 0xb|(Sd&1)<<2, 0xd, Sd>>1, 11, 6, Dm); }
|
2012-06-12 17:00:45 -06:00
|
|
|
// single load/store instructions for both precision types
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int flds(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 1, Rn, Sd>>1, 10, offset); };
|
|
|
|
inline int fldd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset); };
|
|
|
|
inline int fsts(int Sd, int Rn, int offset=0) { return COXFER(AL, 1, 1, Sd&1, 0, 0, Rn, Sd>>1, 10, offset); };
|
|
|
|
inline int fstd(int Dd, int Rn, int offset=0) { return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset); };
|
2012-06-12 17:00:45 -06:00
|
|
|
// move between GPRs and FPRs
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int fmsr(int Sn, int Rd) { return mcr(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
|
|
|
|
inline int fmrs(int Rd, int Sn) { return mrc(10, 0, Rd, Sn>>1, 0, (Sn&1)<<2); }
|
2012-06-12 17:00:45 -06:00
|
|
|
// move to/from VFP system registers
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int fmrx(int Rd, int reg) { return mrc(10, 7, Rd, reg, 0); }
|
2012-06-12 17:00:45 -06:00
|
|
|
// these move around pairs of single-precision registers
|
2011-09-28 15:08:23 -06:00
|
|
|
inline int fmdrr(int Dm, int Rd, int Rn) { return mcrr(11, 1, Rd, Rn, Dm); }
|
|
|
|
inline int fmrrd(int Rd, int Rn, int Dm) { return mrrc(11, 1, Rd, Rn, Dm); }
|
|
|
|
// FLAG SETTERS
|
2010-07-12 14:18:36 -06:00
|
|
|
inline int SETCOND(int ins, int cond) { return ((ins&0x0fffffff) | (cond<<28)); }
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int SETS(int ins) { return ins | 1<<20; }
|
2009-08-06 11:52:36 -06:00
|
|
|
// PSEUDO-INSTRUCTIONS
|
2009-10-29 10:12:30 -06:00
|
|
|
inline int lsl(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSL); }
|
|
|
|
inline int lsli(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSL, imm); }
|
|
|
|
inline int lsr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, LSR); }
|
|
|
|
inline int lsri(int Rd, int Rm, int imm) { return mov(Rd, Rm, LSR, imm); }
|
|
|
|
inline int asr(int Rd, int Rm, int Rs) { return movsh(Rd, Rm, Rs, ASR); }
|
|
|
|
inline int asri(int Rd, int Rm, int imm) { return mov(Rd, Rm, ASR, imm); }
|
2010-07-12 14:18:36 -06:00
|
|
|
inline int beq(int offset) { return SETCOND(b(offset), EQ); }
|
|
|
|
inline int bne(int offset) { return SETCOND(b(offset), NE); }
|
|
|
|
inline int bls(int offset) { return SETCOND(b(offset), LS); }
|
|
|
|
inline int bhi(int offset) { return SETCOND(b(offset), HI); }
|
|
|
|
inline int blt(int offset) { return SETCOND(b(offset), LT); }
|
|
|
|
inline int bgt(int offset) { return SETCOND(b(offset), GT); }
|
|
|
|
inline int ble(int offset) { return SETCOND(b(offset), LE); }
|
|
|
|
inline int bge(int offset) { return SETCOND(b(offset), GE); }
|
2010-08-31 18:35:55 -06:00
|
|
|
inline int blo(int offset) { return SETCOND(b(offset), CC); }
|
|
|
|
inline int bhs(int offset) { return SETCOND(b(offset), CS); }
|
2012-04-02 12:55:23 -06:00
|
|
|
inline int bpl(int offset) { return SETCOND(b(offset), PL); }
|
|
|
|
inline int fmstat() { return fmrx(15, FPSCR); }
|
2011-12-06 09:24:30 -07:00
|
|
|
// HARDWARE FLAGS
|
|
|
|
bool vfpSupported() {
|
2012-10-01 13:39:18 +02:00
|
|
|
// TODO: Use at runtime detection
|
|
|
|
#if defined(__ARM_PCS_VFP)
|
|
|
|
// armhf
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
// armel
|
|
|
|
// TODO: allow VFP use for -mfloat-abi=softfp armel builds.
|
|
|
|
// GCC -mfloat-abi=softfp flag allows use of VFP while remaining compatible
|
|
|
|
// with soft-float code.
|
|
|
|
return false;
|
|
|
|
#endif
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
const uint64_t MASK_LO32 = 0xffffffff;
|
|
|
|
const unsigned MASK_LO16 = 0xffff;
|
|
|
|
const unsigned MASK_LO8 = 0xff;
|
|
|
|
inline unsigned lo8(int64_t i) { return (unsigned)(i&MASK_LO8); }
|
|
|
|
|
2011-09-16 20:53:08 -06:00
|
|
|
inline bool isOfWidth(int64_t i, int size) { return static_cast<uint64_t>(i) >> size == 0; }
|
2010-08-27 18:52:33 -06:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
const int N_GPRS = 16;
|
|
|
|
const int N_FPRS = 16;
|
2012-01-17 14:10:51 -07:00
|
|
|
const uint32_t GPR_MASK = 0xffff;
|
|
|
|
const uint32_t FPR_MASK = 0xffff0000;
|
2012-06-04 12:39:53 -06:00
|
|
|
// for source-to-destination masks
|
|
|
|
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32;
|
|
|
|
// making the following const somehow breaks debug symbol output in GDB
|
|
|
|
/* const */ uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
|
2011-12-06 09:24:30 -07:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
inline bool isFpr(Assembler::Register* reg) {
|
|
|
|
return reg->low >= N_GPRS;
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
inline int fpr64(int reg) { return reg - N_GPRS; }
|
|
|
|
inline int fpr64(Assembler::Register* reg) { return fpr64(reg->low); }
|
|
|
|
inline int fpr32(int reg) { return fpr64(reg) << 1; }
|
|
|
|
inline int fpr32(Assembler::Register* reg) { return fpr64(reg) << 1; }
|
2012-01-26 18:26:29 -07:00
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
const unsigned FrameHeaderSize = 1;
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
const unsigned StackAlignmentInBytes = 8;
|
2011-09-01 10:51:56 -06:00
|
|
|
const unsigned StackAlignmentInWords
|
|
|
|
= StackAlignmentInBytes / TargetBytesPerWord;
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2010-08-27 18:52:33 -06:00
|
|
|
const int ThreadRegister = 8;
|
|
|
|
const int StackRegister = 13;
|
|
|
|
const int LinkRegister = 14;
|
2010-08-24 17:59:01 -06:00
|
|
|
const int ProgramCounter = 15;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-11-15 23:56:34 +00:00
|
|
|
const int32_t PoolOffsetMask = 0xFFF;
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
const bool DebugPool = false;
|
|
|
|
|
|
|
|
class Context;
|
|
|
|
class MyBlock;
|
|
|
|
class PoolOffset;
|
|
|
|
class PoolEvent;
|
|
|
|
|
|
|
|
void
|
|
|
|
resolve(MyBlock*);
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
padding(MyBlock*, unsigned);
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
class MyBlock: public Assembler::Block {
|
|
|
|
public:
|
2010-11-13 19:28:05 -07:00
|
|
|
MyBlock(Context* context, unsigned offset):
|
|
|
|
context(context), next(0), poolOffsetHead(0), poolOffsetTail(0),
|
|
|
|
lastPoolOffsetTail(0), poolEventHead(0), poolEventTail(0),
|
|
|
|
lastEventOffset(0), offset(offset), start(~0), size(0)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual unsigned resolve(unsigned start, Assembler::Block* next) {
|
|
|
|
this->start = start;
|
|
|
|
this->next = static_cast<MyBlock*>(next);
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
::resolve(this);
|
|
|
|
|
|
|
|
return start + size + padding(this, size);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
Context* context;
|
2009-08-06 11:52:36 -06:00
|
|
|
MyBlock* next;
|
2010-11-13 19:28:05 -07:00
|
|
|
PoolOffset* poolOffsetHead;
|
|
|
|
PoolOffset* poolOffsetTail;
|
|
|
|
PoolOffset* lastPoolOffsetTail;
|
|
|
|
PoolEvent* poolEventHead;
|
|
|
|
PoolEvent* poolEventTail;
|
|
|
|
unsigned lastEventOffset;
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned offset;
|
|
|
|
unsigned start;
|
|
|
|
unsigned size;
|
|
|
|
};
|
|
|
|
|
|
|
|
class Task;
|
2010-07-12 14:18:36 -06:00
|
|
|
class ConstantPoolEntry;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
class Context {
|
|
|
|
public:
|
|
|
|
Context(System* s, Allocator* a, Zone* zone):
|
|
|
|
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
|
2012-05-08 16:13:17 -06:00
|
|
|
firstBlock(new(zone) MyBlock(this, 0)),
|
2010-11-13 19:28:05 -07:00
|
|
|
lastBlock(firstBlock), poolOffsetHead(0), poolOffsetTail(0),
|
|
|
|
constantPool(0), constantPoolCount(0)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
Zone* zone;
|
|
|
|
Assembler::Client* client;
|
|
|
|
Vector code;
|
|
|
|
Task* tasks;
|
|
|
|
uint8_t* result;
|
|
|
|
MyBlock* firstBlock;
|
|
|
|
MyBlock* lastBlock;
|
2010-11-13 19:28:05 -07:00
|
|
|
PoolOffset* poolOffsetHead;
|
|
|
|
PoolOffset* poolOffsetTail;
|
2010-07-12 14:18:36 -06:00
|
|
|
ConstantPoolEntry* constantPool;
|
|
|
|
unsigned constantPoolCount;
|
2009-08-06 11:52:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
class Task {
|
|
|
|
public:
|
|
|
|
Task(Task* next): next(next) { }
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
virtual void run(Context* con) = 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
Task* next;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef void (*OperationType)(Context*);
|
|
|
|
|
|
|
|
typedef void (*UnaryOperationType)(Context*, unsigned, Assembler::Operand*);
|
|
|
|
|
|
|
|
typedef void (*BinaryOperationType)
|
|
|
|
(Context*, unsigned, Assembler::Operand*, unsigned, Assembler::Operand*);
|
|
|
|
|
|
|
|
typedef void (*TernaryOperationType)
|
|
|
|
(Context*, unsigned, Assembler::Operand*, Assembler::Operand*,
|
|
|
|
Assembler::Operand*);
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
typedef void (*BranchOperationType)
|
|
|
|
(Context*, TernaryOperation, unsigned, Assembler::Operand*,
|
|
|
|
Assembler::Operand*, Assembler::Operand*);
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
class ArchitectureContext {
|
|
|
|
public:
|
|
|
|
ArchitectureContext(System* s): s(s) { }
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
OperationType operations[OperationCount];
|
|
|
|
UnaryOperationType unaryOperations[UnaryOperationCount
|
|
|
|
* OperandTypeCount];
|
|
|
|
BinaryOperationType binaryOperations
|
|
|
|
[BinaryOperationCount * OperandTypeCount * OperandTypeCount];
|
|
|
|
TernaryOperationType ternaryOperations
|
2010-07-12 14:18:36 -06:00
|
|
|
[NonBranchTernaryOperationCount * OperandTypeCount];
|
|
|
|
BranchOperationType branchOperations
|
|
|
|
[BranchOperationCount * OperandTypeCount * OperandTypeCount];
|
2009-08-06 11:52:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
inline void NO_RETURN
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(Context* con)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con->s);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void NO_RETURN
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(ArchitectureContext* con)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con->s);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
inline void
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(Context* con, bool v)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con->s, v);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(ArchitectureContext* con, bool v)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con->s, v);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
#endif // not NDEBUG
|
|
|
|
|
|
|
|
inline void
|
2012-06-12 17:00:45 -06:00
|
|
|
expect(Context* con, bool v)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
expect(con->s, v);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
class Offset: public Promise {
|
|
|
|
public:
|
2012-06-12 17:00:45 -06:00
|
|
|
Offset(Context* con, MyBlock* block, unsigned offset, bool forTrace):
|
|
|
|
con(con), block(block), offset(offset), forTrace(forTrace)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual bool resolved() {
|
|
|
|
return block->start != static_cast<unsigned>(~0);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual int64_t value() {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, resolved());
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
unsigned o = offset - block->offset;
|
2011-09-01 10:51:56 -06:00
|
|
|
return block->start + padding
|
|
|
|
(block, forTrace ? o - TargetBytesPerWord : o) + o;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Context* con;
|
2009-08-06 11:52:36 -06:00
|
|
|
MyBlock* block;
|
|
|
|
unsigned offset;
|
2010-12-07 15:57:11 -07:00
|
|
|
bool forTrace;
|
2009-08-06 11:52:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
Promise*
|
2012-06-12 17:00:45 -06:00
|
|
|
offset(Context* con, bool forTrace = false)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
return new(con->zone) Offset(con, con->lastBlock, con->code.length(), forTrace);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
bounded(int right, int left, int32_t v)
|
|
|
|
{
|
|
|
|
return ((v << left) >> left) == v and ((v >> right) << right) == v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void*
|
2010-11-13 19:28:05 -07:00
|
|
|
updateOffset(System* s, uint8_t* instruction, int64_t value)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2010-08-24 17:59:01 -06:00
|
|
|
// ARM's PC is two words ahead, and branches drop the bottom 2 bits.
|
|
|
|
int32_t v = (reinterpret_cast<uint8_t*>(value) - (instruction + 8)) >> 2;
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
int32_t mask;
|
2009-10-29 14:14:44 -06:00
|
|
|
expect(s, bounded(0, 8, v));
|
|
|
|
mask = 0xFFFFFF;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
int32_t* p = reinterpret_cast<int32_t*>(instruction);
|
|
|
|
*p = (v & mask) | ((~mask) & *p);
|
|
|
|
|
|
|
|
return instruction + 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
class OffsetListener: public Promise::Listener {
|
|
|
|
public:
|
2010-11-13 19:28:05 -07:00
|
|
|
OffsetListener(System* s, uint8_t* instruction):
|
2009-08-06 11:52:36 -06:00
|
|
|
s(s),
|
2010-11-13 19:28:05 -07:00
|
|
|
instruction(instruction)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual bool resolve(int64_t value, void** location) {
|
2010-11-13 19:28:05 -07:00
|
|
|
void* p = updateOffset(s, instruction, value);
|
2009-08-06 11:52:36 -06:00
|
|
|
if (location) *location = p;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
uint8_t* instruction;
|
|
|
|
};
|
|
|
|
|
|
|
|
class OffsetTask: public Task {
|
|
|
|
public:
|
2010-11-13 19:28:05 -07:00
|
|
|
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset):
|
2009-08-06 11:52:36 -06:00
|
|
|
Task(next),
|
|
|
|
promise(promise),
|
2010-11-13 19:28:05 -07:00
|
|
|
instructionOffset(instructionOffset)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
virtual void run(Context* con) {
|
2009-08-06 11:52:36 -06:00
|
|
|
if (promise->resolved()) {
|
|
|
|
updateOffset
|
2012-06-12 17:00:45 -06:00
|
|
|
(con->s, con->result + instructionOffset->value(), promise->value());
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
|
|
|
new (promise->listen(sizeof(OffsetListener)))
|
2012-06-12 17:00:45 -06:00
|
|
|
OffsetListener(con->s, con->result + instructionOffset->value());
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Promise* promise;
|
|
|
|
Promise* instructionOffset;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
appendOffsetTask(Context* con, Promise* promise, Promise* instructionOffset)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
con->tasks = new(con->zone) OffsetTask(con->tasks, promise, instructionOffset);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
inline unsigned
|
2010-07-12 14:18:36 -06:00
|
|
|
index(ArchitectureContext*, UnaryOperation operation, OperandType operand)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
|
|
|
return operation + (UnaryOperationCount * operand);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline unsigned
|
2010-07-12 14:18:36 -06:00
|
|
|
index(ArchitectureContext*,
|
|
|
|
BinaryOperation operation,
|
2009-08-06 11:52:36 -06:00
|
|
|
OperandType operand1,
|
|
|
|
OperandType operand2)
|
|
|
|
{
|
|
|
|
return operation
|
|
|
|
+ (BinaryOperationCount * operand1)
|
|
|
|
+ (BinaryOperationCount * OperandTypeCount * operand2);
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
bool
|
|
|
|
isBranch(TernaryOperation op)
|
|
|
|
{
|
|
|
|
return op > FloatMin;
|
|
|
|
}
|
|
|
|
|
2012-10-03 13:36:51 -06:00
|
|
|
bool UNUSED
|
2010-07-12 14:18:36 -06:00
|
|
|
isFloatBranch(TernaryOperation op)
|
|
|
|
{
|
|
|
|
return op > JumpIfNotEqual;
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
inline unsigned
|
2012-06-12 17:00:45 -06:00
|
|
|
index(ArchitectureContext* con UNUSED,
|
2010-07-12 14:18:36 -06:00
|
|
|
TernaryOperation operation,
|
2009-08-06 11:52:36 -06:00
|
|
|
OperandType operand1)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, not isBranch(operation));
|
2010-07-12 14:18:36 -06:00
|
|
|
|
|
|
|
return operation + (NonBranchTernaryOperationCount * operand1);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned
|
2012-06-12 17:00:45 -06:00
|
|
|
branchIndex(ArchitectureContext* con UNUSED, OperandType operand1,
|
2010-07-12 14:18:36 -06:00
|
|
|
OperandType operand2)
|
|
|
|
{
|
|
|
|
return operand1 + (OperandTypeCount * operand2);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
// BEGIN OPERATION COMPILERS
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
using namespace isa;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
// shortcut functions
|
|
|
|
inline void emit(Context* con, int code) { con->code.append4(code); }
|
2012-01-26 18:26:29 -07:00
|
|
|
|
|
|
|
inline int newTemp(Context* con) {
|
2012-06-04 12:39:53 -06:00
|
|
|
return con->client->acquireTemporary(GPR_MASK);
|
2012-01-26 18:26:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
inline int newTemp(Context* con, unsigned mask) {
|
|
|
|
return con->client->acquireTemporary(mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void freeTemp(Context* con, int r) {
|
|
|
|
con->client->releaseTemporary(r);
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
inline int64_t getValue(Assembler::Constant* con) {
|
|
|
|
return con->value->value();
|
2012-01-26 18:26:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
inline Assembler::Register makeTemp(Context* con) {
|
|
|
|
Assembler::Register tmp(newTemp(con));
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline Assembler::Register makeTemp64(Context* con) {
|
|
|
|
Assembler::Register tmp(newTemp(con), newTemp(con));
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void freeTemp(Context* con, const Assembler::Register& tmp) {
|
|
|
|
if (tmp.low != NoRegister) freeTemp(con, tmp.low);
|
|
|
|
if (tmp.high != NoRegister) freeTemp(con, tmp.high);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
inline void
|
|
|
|
write4(uint8_t* dst, uint32_t v)
|
|
|
|
{
|
|
|
|
memcpy(dst, &v, 4);
|
|
|
|
}
|
|
|
|
|
2012-08-11 19:09:03 +00:00
|
|
|
void
|
|
|
|
andC(Context* con, unsigned size, Assembler::Constant* a,
|
|
|
|
Assembler::Register* b, Assembler::Register* dst);
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void shiftLeftR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
2012-08-11 19:09:03 +00:00
|
|
|
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
|
|
|
ResolvedPromise maskPromise(0x3F);
|
|
|
|
Assembler::Constant mask(&maskPromise);
|
|
|
|
Assembler::Register dst(tmp3);
|
|
|
|
andC(con, 4, &mask, a, &dst);
|
|
|
|
emit(con, lsl(tmp1, b->high, tmp3));
|
|
|
|
emit(con, rsbi(tmp2, tmp3, 32));
|
2010-09-03 12:52:11 -06:00
|
|
|
emit(con, orrsh(tmp1, tmp1, b->low, tmp2, LSR));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, SETS(subi(t->high, tmp3, 32)));
|
2010-09-03 12:52:11 -06:00
|
|
|
emit(con, SETCOND(mov(t->high, tmp1), MI));
|
|
|
|
emit(con, SETCOND(lsl(t->high, b->low, t->high), PL));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, lsl(t->low, b->low, tmp3));
|
|
|
|
freeTemp(con, tmp1); freeTemp(con, tmp2); freeTemp(con, tmp3);
|
2010-09-03 12:52:11 -06:00
|
|
|
} else {
|
2012-08-11 19:09:03 +00:00
|
|
|
int tmp = newTemp(con);
|
|
|
|
ResolvedPromise maskPromise(0x1F);
|
|
|
|
Assembler::Constant mask(&maskPromise);
|
|
|
|
Assembler::Register dst(tmp);
|
|
|
|
andC(con, size, &mask, a, &dst);
|
|
|
|
emit(con, lsl(t->low, b->low, tmp));
|
|
|
|
freeTemp(con, tmp);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-11 19:09:03 +00:00
|
|
|
void
|
|
|
|
moveRR(Context* con, unsigned srcSize, Assembler::Register* src,
|
|
|
|
unsigned dstSize, Assembler::Register* dst);
|
|
|
|
|
2010-08-24 17:59:01 -06:00
|
|
|
void shiftLeftC(Context* con, unsigned size UNUSED, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2011-09-01 10:51:56 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2012-08-11 19:09:03 +00:00
|
|
|
if (getValue(a) & 0x1F) {
|
|
|
|
emit(con, lsli(t->low, b->low, getValue(a) & 0x1F));
|
|
|
|
} else {
|
|
|
|
moveRR(con, size, b, size, t);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void shiftRightR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
2012-08-11 19:09:03 +00:00
|
|
|
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
|
|
|
ResolvedPromise maskPromise(0x3F);
|
|
|
|
Assembler::Constant mask(&maskPromise);
|
|
|
|
Assembler::Register dst(tmp3);
|
|
|
|
andC(con, 4, &mask, a, &dst);
|
|
|
|
emit(con, lsr(tmp1, b->low, tmp3));
|
|
|
|
emit(con, rsbi(tmp2, tmp3, 32));
|
2010-09-03 12:52:11 -06:00
|
|
|
emit(con, orrsh(tmp1, tmp1, b->high, tmp2, LSL));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, SETS(subi(t->low, tmp3, 32)));
|
2010-09-03 12:52:11 -06:00
|
|
|
emit(con, SETCOND(mov(t->low, tmp1), MI));
|
|
|
|
emit(con, SETCOND(asr(t->low, b->high, t->low), PL));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, asr(t->high, b->high, tmp3));
|
|
|
|
freeTemp(con, tmp1); freeTemp(con, tmp2); freeTemp(con, tmp3);
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2012-08-11 19:09:03 +00:00
|
|
|
int tmp = newTemp(con);
|
|
|
|
ResolvedPromise maskPromise(0x1F);
|
|
|
|
Assembler::Constant mask(&maskPromise);
|
|
|
|
Assembler::Register dst(tmp);
|
|
|
|
andC(con, size, &mask, a, &dst);
|
|
|
|
emit(con, asr(t->low, b->low, tmp));
|
|
|
|
freeTemp(con, tmp);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-24 17:59:01 -06:00
|
|
|
void shiftRightC(Context* con, unsigned size UNUSED, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2011-09-01 10:51:56 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2012-08-11 19:09:03 +00:00
|
|
|
if (getValue(a) & 0x1F) {
|
|
|
|
emit(con, asri(t->low, b->low, getValue(a) & 0x1F));
|
|
|
|
} else {
|
|
|
|
moveRR(con, size, b, size, t);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void unsignedShiftRightR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-08-11 19:09:03 +00:00
|
|
|
int tmpShift = newTemp(con);
|
|
|
|
ResolvedPromise maskPromise(size == 8 ? 0x3F : 0x1F);
|
|
|
|
Assembler::Constant mask(&maskPromise);
|
|
|
|
Assembler::Register dst(tmpShift);
|
|
|
|
andC(con, 4, &mask, a, &dst);
|
|
|
|
emit(con, lsr(t->low, b->low, tmpShift));
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
|
|
|
int tmpHi = newTemp(con), tmpLo = newTemp(con);
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, SETS(rsbi(tmpHi, tmpShift, 32)));
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, lsl(tmpLo, b->high, tmpHi));
|
|
|
|
emit(con, orr(t->low, t->low, tmpLo));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, addi(tmpHi, tmpShift, -32));
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, lsr(tmpLo, b->high, tmpHi));
|
|
|
|
emit(con, orr(t->low, t->low, tmpLo));
|
2012-08-11 19:09:03 +00:00
|
|
|
emit(con, lsr(t->high, b->high, tmpShift));
|
2009-10-29 10:12:30 -06:00
|
|
|
freeTemp(con, tmpHi); freeTemp(con, tmpLo);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2012-08-11 19:09:03 +00:00
|
|
|
freeTemp(con, tmpShift);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-08-24 17:59:01 -06:00
|
|
|
void unsignedShiftRightC(Context* con, unsigned size UNUSED, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2011-09-01 10:51:56 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2012-08-11 19:09:03 +00:00
|
|
|
if (getValue(a) & 0x1F) {
|
|
|
|
emit(con, lsri(t->low, b->low, getValue(a) & 0x1F));
|
|
|
|
} else {
|
|
|
|
moveRR(con, size, b, size, t);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
class ConstantPoolEntry: public Promise {
|
|
|
|
public:
|
2012-06-12 17:00:45 -06:00
|
|
|
ConstantPoolEntry(Context* con, Promise* constant, ConstantPoolEntry* next,
|
2010-11-16 02:38:36 +00:00
|
|
|
Promise* callOffset):
|
2012-06-12 17:00:45 -06:00
|
|
|
con(con), constant(constant), next(next), callOffset(callOffset),
|
2010-11-16 02:38:36 +00:00
|
|
|
address(0)
|
2010-11-13 19:28:05 -07:00
|
|
|
{ }
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
virtual int64_t value() {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, resolved());
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
return reinterpret_cast<int64_t>(address);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
virtual bool resolved() {
|
|
|
|
return address != 0;
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Context* con;
|
2010-11-13 19:28:05 -07:00
|
|
|
Promise* constant;
|
|
|
|
ConstantPoolEntry* next;
|
2010-11-16 02:38:36 +00:00
|
|
|
Promise* callOffset;
|
2010-11-13 19:28:05 -07:00
|
|
|
void* address;
|
|
|
|
unsigned constantPoolCount;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ConstantPoolListener: public Promise::Listener {
|
2009-08-06 11:52:36 -06:00
|
|
|
public:
|
2011-09-01 10:51:56 -06:00
|
|
|
ConstantPoolListener(System* s, target_uintptr_t* address,
|
|
|
|
uint8_t* returnAddress):
|
2010-11-13 19:28:05 -07:00
|
|
|
s(s),
|
2010-11-16 02:38:36 +00:00
|
|
|
address(address),
|
|
|
|
returnAddress(returnAddress)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual bool resolve(int64_t value, void** location) {
|
2010-11-13 19:28:05 -07:00
|
|
|
*address = value;
|
2010-11-16 02:38:36 +00:00
|
|
|
if (location) {
|
|
|
|
*location = returnAddress ? static_cast<void*>(returnAddress) : address;
|
|
|
|
}
|
2010-11-13 19:28:05 -07:00
|
|
|
return true;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
System* s;
|
2011-09-01 10:51:56 -06:00
|
|
|
target_uintptr_t* address;
|
2010-11-16 02:38:36 +00:00
|
|
|
uint8_t* returnAddress;
|
2010-11-13 19:28:05 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
class PoolOffset {
|
|
|
|
public:
|
|
|
|
PoolOffset(MyBlock* block, ConstantPoolEntry* entry, unsigned offset):
|
|
|
|
block(block), entry(entry), next(0), offset(offset)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
MyBlock* block;
|
|
|
|
ConstantPoolEntry* entry;
|
|
|
|
PoolOffset* next;
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
class PoolEvent {
|
2009-08-06 11:52:36 -06:00
|
|
|
public:
|
2010-11-13 19:28:05 -07:00
|
|
|
PoolEvent(PoolOffset* poolOffsetHead, PoolOffset* poolOffsetTail,
|
|
|
|
unsigned offset):
|
|
|
|
poolOffsetHead(poolOffsetHead), poolOffsetTail(poolOffsetTail), next(0),
|
|
|
|
offset(offset)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
PoolOffset* poolOffsetHead;
|
|
|
|
PoolOffset* poolOffsetTail;
|
|
|
|
PoolEvent* next;
|
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
appendConstantPoolEntry(Context* con, Promise* constant, Promise* callOffset)
|
2010-11-13 19:28:05 -07:00
|
|
|
{
|
|
|
|
if (constant->resolved()) {
|
|
|
|
// make a copy, since the original might be allocated on the
|
|
|
|
// stack, and we need our copy to live until assembly is complete
|
2012-06-12 17:00:45 -06:00
|
|
|
constant = new(con->zone) ResolvedPromise(constant->value());
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
con->constantPool = new(con->zone) ConstantPoolEntry(con, constant, con->constantPool, callOffset);
|
2010-11-13 19:28:05 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
++ con->constantPoolCount;
|
2010-11-13 19:28:05 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
PoolOffset* o = new(con->zone) PoolOffset(con->lastBlock, con->constantPool, con->code.length() - con->lastBlock->offset);
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
if (DebugPool) {
|
|
|
|
fprintf(stderr, "add pool offset %p %d to block %p\n",
|
2012-06-12 17:00:45 -06:00
|
|
|
o, o->offset, con->lastBlock);
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
if (con->lastBlock->poolOffsetTail) {
|
|
|
|
con->lastBlock->poolOffsetTail->next = o;
|
2010-11-13 19:28:05 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
con->lastBlock->poolOffsetHead = o;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
con->lastBlock->poolOffsetTail = o;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
appendPoolEvent(Context* con, MyBlock* b, unsigned offset, PoolOffset* head,
|
2010-11-13 19:28:05 -07:00
|
|
|
PoolOffset* tail)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
PoolEvent* e = new(con->zone) PoolEvent(head, tail, offset);
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
if (b->poolEventTail) {
|
|
|
|
b->poolEventTail->next = e;
|
|
|
|
} else {
|
|
|
|
b->poolEventHead = e;
|
|
|
|
}
|
|
|
|
b->poolEventTail = e;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
2011-05-11 19:56:29 -06:00
|
|
|
bool
|
|
|
|
needJump(MyBlock* b)
|
|
|
|
{
|
|
|
|
return b->next or b->size != (b->size & PoolOffsetMask);
|
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
unsigned
|
|
|
|
padding(MyBlock* b, unsigned offset)
|
|
|
|
{
|
|
|
|
unsigned total = 0;
|
|
|
|
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
|
|
|
|
if (e->offset <= offset) {
|
2011-05-11 19:56:29 -06:00
|
|
|
if (needJump(b)) {
|
2011-09-01 10:51:56 -06:00
|
|
|
total += TargetBytesPerWord;
|
2011-02-27 23:03:13 -07:00
|
|
|
}
|
2010-11-13 19:28:05 -07:00
|
|
|
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
|
2011-09-01 10:51:56 -06:00
|
|
|
total += TargetBytesPerWord;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2010-11-13 19:28:05 -07:00
|
|
|
return total;
|
|
|
|
}
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
void
|
|
|
|
resolve(MyBlock* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
Context* con = b->context;
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
if (b->poolOffsetHead) {
|
2012-06-12 17:00:45 -06:00
|
|
|
if (con->poolOffsetTail) {
|
|
|
|
con->poolOffsetTail->next = b->poolOffsetHead;
|
2010-11-13 19:28:05 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
con->poolOffsetHead = b->poolOffsetHead;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
con->poolOffsetTail = b->poolOffsetTail;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
if (con->poolOffsetHead) {
|
2010-11-13 19:28:05 -07:00
|
|
|
bool append;
|
|
|
|
if (b->next == 0 or b->next->poolEventHead) {
|
|
|
|
append = true;
|
|
|
|
} else {
|
2011-09-01 10:51:56 -06:00
|
|
|
int32_t v = (b->start + b->size + b->next->size + TargetBytesPerWord - 8)
|
2012-06-12 17:00:45 -06:00
|
|
|
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
append = (v != (v & PoolOffsetMask));
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
if (DebugPool) {
|
2010-12-07 18:17:41 -07:00
|
|
|
fprintf(stderr,
|
|
|
|
"current %p %d %d next %p %d %d\n",
|
|
|
|
b, b->start, b->size, b->next, b->start + b->size,
|
|
|
|
b->next->size);
|
2010-11-13 19:28:05 -07:00
|
|
|
fprintf(stderr,
|
|
|
|
"offset %p %d is of distance %d to next block; append? %d\n",
|
2012-06-12 17:00:45 -06:00
|
|
|
con->poolOffsetHead, con->poolOffsetHead->offset, v, append);
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (append) {
|
2010-12-07 18:17:41 -07:00
|
|
|
#ifndef NDEBUG
|
|
|
|
int32_t v = (b->start + b->size - 8)
|
2012-06-12 17:00:45 -06:00
|
|
|
- (con->poolOffsetHead->offset + con->poolOffsetHead->block->start);
|
2010-12-07 18:17:41 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
expect(con, v == (v & PoolOffsetMask));
|
2010-12-07 18:17:41 -07:00
|
|
|
#endif // not NDEBUG
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
appendPoolEvent(con, b, b->size, con->poolOffsetHead, con->poolOffsetTail);
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
if (DebugPool) {
|
2012-06-12 17:00:45 -06:00
|
|
|
for (PoolOffset* o = con->poolOffsetHead; o; o = o->next) {
|
2010-11-13 19:28:05 -07:00
|
|
|
fprintf(stderr,
|
|
|
|
"include %p %d in pool event %p at offset %d in block %p\n",
|
|
|
|
o, o->offset, b->poolEventTail, b->size, b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
con->poolOffsetHead = 0;
|
|
|
|
con->poolOffsetTail = 0;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
jumpR(Context* con, unsigned size UNUSED, Assembler::Register* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
|
|
|
emit(con, bx(target->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
swapRR(Context* con, unsigned aSize, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize, Assembler::Register* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, aSize == TargetBytesPerWord);
|
|
|
|
assert(con, bSize == TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveRR(con, aSize, a, bSize, &tmp);
|
|
|
|
moveRR(con, bSize, b, aSize, a);
|
|
|
|
moveRR(con, bSize, &tmp, bSize, b);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-01-26 18:26:29 -07:00
|
|
|
moveRR(Context* con, unsigned srcSize, Assembler::Register* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize, Assembler::Register* dst)
|
|
|
|
{
|
2012-01-26 18:26:29 -07:00
|
|
|
bool srcIsFpr = isFpr(src);
|
|
|
|
bool dstIsFpr = isFpr(dst);
|
2012-06-12 17:00:45 -06:00
|
|
|
if (srcIsFpr || dstIsFpr) { // FPR(s) involved
|
|
|
|
assert(con, srcSize == dstSize);
|
|
|
|
const bool dprec = srcSize == 8;
|
|
|
|
if (srcIsFpr && dstIsFpr) { // FPR to FPR
|
|
|
|
if (dprec) emit(con, fcpyd(fpr64(dst), fpr64(src))); // double
|
|
|
|
else emit(con, fcpys(fpr32(dst), fpr32(src))); // single
|
|
|
|
} else if (srcIsFpr) { // FPR to GPR
|
|
|
|
if (dprec) emit(con, fmrrd(dst->low, dst->high, fpr64(src)));
|
|
|
|
else emit(con, fmrs(dst->low, fpr32(src)));
|
|
|
|
} else { // GPR to FPR
|
|
|
|
if (dprec) emit(con, fmdrr(fpr64(dst->low), src->low, src->high));
|
|
|
|
else emit(con, fmsr(fpr32(dst), src->low));
|
|
|
|
}
|
2012-01-26 18:26:29 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
switch (srcSize) {
|
|
|
|
case 1:
|
2012-01-26 18:26:29 -07:00
|
|
|
emit(con, lsli(dst->low, src->low, 24));
|
|
|
|
emit(con, asri(dst->low, dst->low, 24));
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
2012-01-26 18:26:29 -07:00
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
case 2:
|
2012-01-26 18:26:29 -07:00
|
|
|
emit(con, lsli(dst->low, src->low, 16));
|
|
|
|
emit(con, asri(dst->low, dst->low, 16));
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
2012-01-26 18:26:29 -07:00
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
if (srcSize == 4 and dstSize == 8) {
|
2012-01-26 18:26:29 -07:00
|
|
|
moveRR(con, 4, src, 4, dst);
|
|
|
|
emit(con, asri(dst->high, src->low, 31));
|
2009-08-06 11:52:36 -06:00
|
|
|
} else if (srcSize == 8 and dstSize == 8) {
|
|
|
|
Assembler::Register srcHigh(src->high);
|
|
|
|
Assembler::Register dstHigh(dst->high);
|
|
|
|
|
|
|
|
if (src->high == dst->low) {
|
|
|
|
if (src->low == dst->high) {
|
2012-01-26 18:26:29 -07:00
|
|
|
swapRR(con, 4, src, 4, dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2012-01-26 18:26:29 -07:00
|
|
|
moveRR(con, 4, &srcHigh, 4, &dstHigh);
|
|
|
|
moveRR(con, 4, src, 4, dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
} else {
|
2012-01-26 18:26:29 -07:00
|
|
|
moveRR(con, 4, src, 4, dst);
|
|
|
|
moveRR(con, 4, &srcHigh, 4, &dstHigh);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
} else if (src->low != dst->low) {
|
2012-01-26 18:26:29 -07:00
|
|
|
emit(con, mov(dst->low, src->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
default: abort(con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveZRR(Context* con, unsigned srcSize, Assembler::Register* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned, Assembler::Register* dst)
|
|
|
|
{
|
|
|
|
switch (srcSize) {
|
|
|
|
case 2:
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, lsli(dst->low, src->low, 16));
|
|
|
|
emit(con, lsri(dst->low, dst->low, 16));
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
default: abort(con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void moveCR(Context* con, unsigned size, Assembler::Constant* src,
|
|
|
|
unsigned, Assembler::Register* dst);
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2012-01-26 18:26:29 -07:00
|
|
|
moveCR2(Context* con, unsigned size, Assembler::Constant* src,
|
|
|
|
Assembler::Register* dst, Promise* callOffset)
|
|
|
|
{
|
|
|
|
if (isFpr(dst)) { // floating-point
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp = size > 4 ? makeTemp64(con) :
|
|
|
|
makeTemp(con);
|
|
|
|
moveCR(con, size, src, size, &tmp);
|
2012-01-26 18:26:29 -07:00
|
|
|
moveRR(con, size, &tmp, size, dst);
|
|
|
|
freeTemp(con, tmp);
|
2012-06-12 17:00:45 -06:00
|
|
|
} else if (size > 4) {
|
|
|
|
uint64_t value = (uint64_t)src->value->value();
|
|
|
|
ResolvedPromise loBits(value & MASK_LO32);
|
|
|
|
Assembler::Constant srcLo(&loBits);
|
|
|
|
ResolvedPromise hiBits(value >> 32);
|
|
|
|
Assembler::Constant srcHi(&hiBits);
|
|
|
|
Assembler::Register dstHi(dst->high);
|
|
|
|
moveCR(con, 4, &srcLo, 4, dst);
|
|
|
|
moveCR(con, 4, &srcHi, 4, &dstHi);
|
|
|
|
} else if (src->value->resolved() and isOfWidth(getValue(src), 8)) {
|
|
|
|
emit(con, movi(dst->low, lo8(getValue(src)))); // fits in immediate
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
appendConstantPoolEntry(con, src->value, callOffset);
|
|
|
|
emit(con, ldri(dst->low, ProgramCounter, 0)); // load 32 bits
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-01-26 18:26:29 -07:00
|
|
|
moveCR(Context* con, unsigned size, Assembler::Constant* src,
|
|
|
|
unsigned, Assembler::Register* dst)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-01-26 18:26:29 -07:00
|
|
|
moveCR2(con, size, src, dst, 0);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void addR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
2010-08-31 18:35:55 -06:00
|
|
|
emit(con, SETS(add(t->low, a->low, b->low)));
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, adc(t->high, a->high, b->high));
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, add(t->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void subR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
|
|
|
emit(con, SETS(rsb(t->low, a->low, b->low)));
|
|
|
|
emit(con, rsc(t->high, a->high, b->high));
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, rsb(t->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
addC(Context* con, unsigned size, Assembler::Constant* a,
|
2011-01-28 17:16:08 -07:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
|
|
|
|
int32_t v = a->value->value();
|
|
|
|
if (v) {
|
2011-01-29 11:10:54 -07:00
|
|
|
if (v > 0 and v < 256) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, addi(dst->low, b->low, v));
|
2011-01-29 11:10:54 -07:00
|
|
|
} else if (v > 0 and v < 1024 and v % 4 == 0) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, addi(dst->low, b->low, v >> 2, 15));
|
2011-01-28 17:16:08 -07:00
|
|
|
} else {
|
2011-01-29 11:10:54 -07:00
|
|
|
// todo
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con);
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRR(con, size, b, size, dst);
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
subC(Context* con, unsigned size, Assembler::Constant* a,
|
2011-01-28 17:16:08 -07:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
|
|
|
|
int32_t v = a->value->value();
|
2011-01-29 11:10:54 -07:00
|
|
|
if (v) {
|
|
|
|
if (v > 0 and v < 256) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, subi(dst->low, b->low, v));
|
2011-01-29 11:10:54 -07:00
|
|
|
} else if (v > 0 and v < 1024 and v % 4 == 0) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, subi(dst->low, b->low, v >> 2, 15));
|
2011-01-29 11:10:54 -07:00
|
|
|
} else {
|
|
|
|
// todo
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con);
|
2011-01-29 11:10:54 -07:00
|
|
|
}
|
2011-01-28 17:16:08 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRR(con, size, b, size, dst);
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void multiplyR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
2009-10-29 10:12:30 -06:00
|
|
|
if (size == 8) {
|
2010-09-02 16:09:01 -06:00
|
|
|
bool useTemporaries = b->low == t->low;
|
2012-06-12 17:00:45 -06:00
|
|
|
int tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK) : t->low;
|
|
|
|
int tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK) : t->high;
|
2010-09-02 16:09:01 -06:00
|
|
|
|
|
|
|
emit(con, umull(tmpLow, tmpHigh, a->low, b->low));
|
|
|
|
emit(con, mla(tmpHigh, a->low, b->high, tmpHigh));
|
|
|
|
emit(con, mla(tmpHigh, a->high, b->low, tmpHigh));
|
|
|
|
|
|
|
|
if (useTemporaries) {
|
|
|
|
emit(con, mov(t->low, tmpLow));
|
|
|
|
emit(con, mov(t->high, tmpHigh));
|
|
|
|
con->client->releaseTemporary(tmpLow);
|
|
|
|
con->client->releaseTemporary(tmpHigh);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2009-10-29 10:12:30 -06:00
|
|
|
emit(con, mul(t->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void floatAbsoluteRR(Context* con, unsigned size, Assembler::Register* a, unsigned, Assembler::Register* b) {
|
2011-12-06 09:24:30 -07:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fabsd(fpr64(b), fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fabss(fpr32(b), fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void floatNegateRR(Context* con, unsigned size, Assembler::Register* a, unsigned, Assembler::Register* b) {
|
2011-12-06 09:24:30 -07:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fnegd(fpr64(b), fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fnegs(fpr32(b), fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void float2FloatRR(Context* con, unsigned size, Assembler::Register* a, unsigned, Assembler::Register* b) {
|
2011-12-06 09:24:30 -07:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fcvtsd(fpr32(b), fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fcvtds(fpr64(b), fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void float2IntRR(Context* con, unsigned size, Assembler::Register* a, unsigned, Assembler::Register* b) {
|
2012-01-17 14:10:51 -07:00
|
|
|
int tmp = newTemp(con, FPR_MASK);
|
2012-06-12 17:00:45 -06:00
|
|
|
int ftmp = fpr32(tmp);
|
2011-12-06 09:24:30 -07:00
|
|
|
if (size == 8) { // double to int
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, ftosizd(ftmp, fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else { // float to int
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, ftosizs(ftmp, fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} // else thunked
|
2012-06-04 12:39:53 -06:00
|
|
|
emit(con, fmrs(b->low, ftmp));
|
2011-12-06 09:24:30 -07:00
|
|
|
freeTemp(con, tmp);
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void int2FloatRR(Context* con, unsigned, Assembler::Register* a, unsigned size, Assembler::Register* b) {
|
|
|
|
emit(con, fmsr(fpr32(b), a->low));
|
2011-12-06 09:24:30 -07:00
|
|
|
if (size == 8) { // int to double
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsitod(fpr64(b), fpr32(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else { // int to float
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsitos(fpr32(b), fpr32(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} // else thunked
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
void floatSqrtRR(Context* con, unsigned size, Assembler::Register* a, unsigned, Assembler::Register* b) {
|
2012-06-04 12:39:53 -06:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsqrtd(fpr64(b), fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsqrts(fpr32(b), fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void floatAddR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
2012-06-04 12:39:53 -06:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, faddd(fpr64(t), fpr64(a), fpr64(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fadds(fpr32(t), fpr32(a), fpr32(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void floatSubtractR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
2012-06-04 12:39:53 -06:00
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsubd(fpr64(t), fpr64(b), fpr64(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fsubs(fpr32(t), fpr32(b), fpr32(a)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-29 14:25:31 -06:00
|
|
|
void floatMultiplyR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fmuld(fpr64(t), fpr64(a), fpr64(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fmuls(fpr32(t), fpr32(a), fpr32(b)));
|
2011-12-06 09:24:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void floatDivideR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t) {
|
|
|
|
if (size == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fdivd(fpr64(t), fpr64(b), fpr64(a)));
|
2011-09-29 14:25:31 -06:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, fdivs(fpr32(t), fpr32(b), fpr32(a)));
|
2011-09-29 14:25:31 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
int
|
2012-06-12 17:00:45 -06:00
|
|
|
normalize(Context* con, int offset, int index, unsigned scale,
|
2009-08-06 11:52:36 -06:00
|
|
|
bool* preserveIndex, bool* release)
|
|
|
|
{
|
|
|
|
if (offset != 0 or scale != 1) {
|
|
|
|
Assembler::Register normalizedIndex
|
2012-06-12 17:00:45 -06:00
|
|
|
(*preserveIndex ? con->client->acquireTemporary(GPR_MASK) : index);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
if (*preserveIndex) {
|
|
|
|
*release = true;
|
|
|
|
*preserveIndex = false;
|
|
|
|
} else {
|
|
|
|
*release = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int scaled;
|
|
|
|
|
|
|
|
if (scale != 1) {
|
|
|
|
Assembler::Register unscaledIndex(index);
|
|
|
|
|
|
|
|
ResolvedPromise scalePromise(log(scale));
|
|
|
|
Assembler::Constant scaleConstant(&scalePromise);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
shiftLeftC(con, TargetBytesPerWord, &scaleConstant,
|
2009-08-06 11:52:36 -06:00
|
|
|
&unscaledIndex, &normalizedIndex);
|
|
|
|
|
|
|
|
scaled = normalizedIndex.low;
|
|
|
|
} else {
|
|
|
|
scaled = index;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset != 0) {
|
|
|
|
Assembler::Register untranslatedIndex(scaled);
|
|
|
|
|
|
|
|
ResolvedPromise offsetPromise(offset);
|
|
|
|
Assembler::Constant offsetConstant(&offsetPromise);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveCR(con, TargetBytesPerWord, &offsetConstant, TargetBytesPerWord, &tmp);
|
|
|
|
addR(con, TargetBytesPerWord, &tmp, &untranslatedIndex, &normalizedIndex);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return normalizedIndex.low;
|
|
|
|
} else {
|
|
|
|
*release = false;
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-01-26 18:26:29 -07:00
|
|
|
store(Context* con, unsigned size, Assembler::Register* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
int base, int offset, int index, unsigned scale, bool preserveIndex)
|
|
|
|
{
|
|
|
|
if (index != NoRegister) {
|
|
|
|
bool release;
|
|
|
|
int normalized = normalize
|
2012-01-26 18:26:29 -07:00
|
|
|
(con, offset, index, scale, &preserveIndex, &release);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(src)) { // GPR store
|
2012-01-26 18:26:29 -07:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
emit(con, strb(src->low, base, normalized));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
emit(con, strh(src->low, base, normalized));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
emit(con, str(src->low, base, normalized));
|
|
|
|
break;
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
case 8: { // split into 2 32-bit stores
|
2012-01-26 18:26:29 -07:00
|
|
|
Assembler::Register srcHigh(src->high);
|
|
|
|
store(con, 4, &srcHigh, base, 0, normalized, 1, preserveIndex);
|
|
|
|
store(con, 4, src, base, 4, normalized, 1, preserveIndex);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
default: abort(con);
|
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
} else { // FPR store
|
|
|
|
Assembler::Register base_(base),
|
|
|
|
normalized_(normalized),
|
|
|
|
absAddr = makeTemp(con);
|
|
|
|
// FPR stores have only bases, so we must add the index
|
|
|
|
addR(con, TargetBytesPerWord, &base_, &normalized_, &absAddr);
|
|
|
|
// double-precision
|
|
|
|
if (size == 8) emit(con, fstd(fpr64(src), absAddr.low));
|
|
|
|
// single-precision
|
|
|
|
else emit(con, fsts(fpr32(src), absAddr.low));
|
|
|
|
freeTemp(con, absAddr);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
if (release) con->client->releaseTemporary(normalized);
|
2010-11-09 17:31:52 -07:00
|
|
|
} else if (size == 8
|
|
|
|
or abs(offset) == (abs(offset) & 0xFF)
|
|
|
|
or (size != 2 and abs(offset) == (abs(offset) & 0xFFF)))
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(src)) { // GPR store
|
2012-01-26 18:26:29 -07:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
emit(con, strbi(src->low, base, offset));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
emit(con, strhi(src->low, base, offset));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
emit(con, stri(src->low, base, offset));
|
|
|
|
break;
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
case 8: { // split into 2 32-bit stores
|
2012-01-26 18:26:29 -07:00
|
|
|
Assembler::Register srcHigh(src->high);
|
|
|
|
store(con, 4, &srcHigh, base, offset, NoRegister, 1, false);
|
|
|
|
store(con, 4, src, base, offset + 4, NoRegister, 1, false);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
default: abort(con);
|
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
} else { // FPR store
|
|
|
|
// double-precision
|
|
|
|
if (size == 8) emit(con, fstd(fpr64(src), base, offset));
|
|
|
|
// single-precision
|
|
|
|
else emit(con, fsts(fpr32(src), base, offset));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2010-11-09 11:36:38 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
2010-11-09 11:36:38 -07:00
|
|
|
ResolvedPromise offsetPromise(offset);
|
|
|
|
Assembler::Constant offsetConstant(&offsetPromise);
|
2012-01-26 18:26:29 -07:00
|
|
|
moveCR(con, TargetBytesPerWord, &offsetConstant,
|
|
|
|
TargetBytesPerWord, &tmp);
|
2010-11-09 11:36:38 -07:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
store(con, size, src, base, 0, tmp.low, 1, false);
|
2010-11-09 11:36:38 -07:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRM(Context* con, unsigned srcSize, Assembler::Register* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize UNUSED, Assembler::Memory* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, srcSize == dstSize);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
store(con, srcSize, src, dst->base, dst->offset, dst->index, dst->scale, true);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-01-26 18:26:29 -07:00
|
|
|
load(Context* con, unsigned srcSize, int base, int offset, int index,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned scale, unsigned dstSize, Assembler::Register* dst,
|
|
|
|
bool preserveIndex, bool signExtend)
|
|
|
|
{
|
|
|
|
if (index != NoRegister) {
|
|
|
|
bool release;
|
|
|
|
int normalized = normalize
|
2012-01-26 18:26:29 -07:00
|
|
|
(con, offset, index, scale, &preserveIndex, &release);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(dst)) { // GPR load
|
2012-01-26 18:26:29 -07:00
|
|
|
switch (srcSize) {
|
|
|
|
case 1:
|
|
|
|
if (signExtend) {
|
|
|
|
emit(con, ldrsb(dst->low, base, normalized));
|
|
|
|
} else {
|
|
|
|
emit(con, ldrb(dst->low, base, normalized));
|
|
|
|
}
|
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
case 2:
|
|
|
|
if (signExtend) {
|
|
|
|
emit(con, ldrsh(dst->low, base, normalized));
|
|
|
|
} else {
|
|
|
|
emit(con, ldrh(dst->low, base, normalized));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
case 8: {
|
|
|
|
if (srcSize == 4 and dstSize == 8) {
|
|
|
|
load(con, 4, base, 0, normalized, 1, 4, dst, preserveIndex,
|
|
|
|
false);
|
|
|
|
moveRR(con, 4, dst, 8, dst);
|
|
|
|
} else if (srcSize == 8 and dstSize == 8) {
|
|
|
|
Assembler::Register dstHigh(dst->high);
|
|
|
|
load(con, 4, base, 0, normalized, 1, 4, &dstHigh,
|
|
|
|
preserveIndex, false);
|
|
|
|
load(con, 4, base, 4, normalized, 1, 4, dst, preserveIndex,
|
|
|
|
false);
|
|
|
|
} else {
|
|
|
|
emit(con, ldr(dst->low, base, normalized));
|
|
|
|
}
|
|
|
|
} break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
default: abort(con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
} else { // FPR load
|
|
|
|
Assembler::Register base_(base),
|
|
|
|
normalized_(normalized),
|
|
|
|
absAddr = makeTemp(con);
|
|
|
|
// VFP loads only have bases, so we must add the index
|
|
|
|
addR(con, TargetBytesPerWord, &base_, &normalized_, &absAddr);
|
|
|
|
// double-precision
|
|
|
|
if (srcSize == 8) emit(con, fldd(fpr64(dst), absAddr.low));
|
|
|
|
// single-precision
|
|
|
|
else emit(con, flds(fpr32(dst), absAddr.low));
|
|
|
|
freeTemp(con, absAddr);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
if (release) con->client->releaseTemporary(normalized);
|
2010-11-09 11:36:38 -07:00
|
|
|
} else if ((srcSize == 8 and dstSize == 8)
|
2010-11-09 17:31:52 -07:00
|
|
|
or abs(offset) == (abs(offset) & 0xFF)
|
|
|
|
or (srcSize != 2
|
|
|
|
and (srcSize != 1 or not signExtend)
|
|
|
|
and abs(offset) == (abs(offset) & 0xFFF)))
|
2010-11-09 11:36:38 -07:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(dst)) { // GPR load
|
2012-01-26 18:26:29 -07:00
|
|
|
switch (srcSize) {
|
|
|
|
case 1:
|
|
|
|
if (signExtend) {
|
|
|
|
emit(con, ldrsbi(dst->low, base, offset));
|
|
|
|
} else {
|
|
|
|
emit(con, ldrbi(dst->low, base, offset));
|
|
|
|
}
|
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
case 2:
|
|
|
|
if (signExtend) {
|
|
|
|
emit(con, ldrshi(dst->low, base, offset));
|
|
|
|
} else {
|
|
|
|
emit(con, ldrhi(dst->low, base, offset));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
emit(con, ldri(dst->low, base, offset));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 8: {
|
|
|
|
if (dstSize == 8) {
|
|
|
|
Assembler::Register dstHigh(dst->high);
|
|
|
|
load(con, 4, base, offset, NoRegister, 1, 4, &dstHigh, false,
|
|
|
|
false);
|
|
|
|
load(con, 4, base, offset + 4, NoRegister, 1, 4, dst, false,
|
|
|
|
false);
|
|
|
|
} else {
|
|
|
|
emit(con, ldri(dst->low, base, offset));
|
|
|
|
}
|
|
|
|
} break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
default: abort(con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2012-06-12 17:00:45 -06:00
|
|
|
} else { // FPR load
|
|
|
|
// double-precision
|
|
|
|
if (srcSize == 8) emit(con, fldd(fpr64(dst), base, offset));
|
|
|
|
// single-precision
|
|
|
|
else emit(con, flds(fpr32(dst), base, offset));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2010-11-09 11:36:38 -07:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
2010-11-09 11:36:38 -07:00
|
|
|
ResolvedPromise offsetPromise(offset);
|
|
|
|
Assembler::Constant offsetConstant(&offsetPromise);
|
2012-01-26 18:26:29 -07:00
|
|
|
moveCR(con, TargetBytesPerWord, &offsetConstant, TargetBytesPerWord,
|
|
|
|
&tmp);
|
2010-11-09 11:36:38 -07:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
load(con, srcSize, base, 0, tmp.low, 1, dstSize, dst, false,
|
|
|
|
signExtend);
|
2010-11-09 11:36:38 -07:00
|
|
|
|
2012-01-26 18:26:29 -07:00
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveMR(Context* con, unsigned srcSize, Assembler::Memory* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
load(con, srcSize, src->base, src->offset, src->index, src->scale,
|
2009-08-06 11:52:36 -06:00
|
|
|
dstSize, dst, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveZMR(Context* con, unsigned srcSize, Assembler::Memory* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
load(con, srcSize, src->base, src->offset, src->index, src->scale,
|
2009-08-06 11:52:36 -06:00
|
|
|
dstSize, dst, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
andR(Context* con, unsigned size, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
if (size == 8) emit(con, and_(dst->high, a->high, b->high));
|
|
|
|
emit(con, and_(dst->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-09-03 18:32:22 +01:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
andC(Context* con, unsigned size, Assembler::Constant* a,
|
2010-09-03 18:32:22 +01:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
|
|
|
int64_t v = a->value->value();
|
|
|
|
|
|
|
|
if (size == 8) {
|
|
|
|
ResolvedPromise high((v >> 32) & 0xFFFFFFFF);
|
|
|
|
Assembler::Constant ah(&high);
|
|
|
|
|
|
|
|
ResolvedPromise low(v & 0xFFFFFFFF);
|
|
|
|
Assembler::Constant al(&low);
|
|
|
|
|
|
|
|
Assembler::Register bh(b->high);
|
|
|
|
Assembler::Register dh(dst->high);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
andC(con, 4, &al, b, dst);
|
|
|
|
andC(con, 4, &ah, &bh, &dh);
|
2010-09-03 18:32:22 +01:00
|
|
|
} else {
|
|
|
|
uint32_t v32 = static_cast<uint32_t>(v);
|
|
|
|
if (v32 != 0xFFFFFFFF) {
|
|
|
|
if ((v32 & 0xFFFFFF00) == 0xFFFFFF00) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, bici(dst->low, b->low, (~(v32 & 0xFF)) & 0xFF));
|
2010-09-03 18:32:22 +01:00
|
|
|
} else if ((v32 & 0xFFFFFF00) == 0) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, andi(dst->low, b->low, v32 & 0xFF));
|
2010-09-03 18:32:22 +01:00
|
|
|
} else {
|
|
|
|
// todo: there are other cases we can handle in one
|
|
|
|
// instruction
|
|
|
|
|
|
|
|
bool useTemporary = b->low == dst->low;
|
|
|
|
Assembler::Register tmp(dst->low);
|
|
|
|
if (useTemporary) {
|
2012-06-12 17:00:45 -06:00
|
|
|
tmp.low = con->client->acquireTemporary(GPR_MASK);
|
2010-09-03 18:32:22 +01:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCR(con, 4, a, 4, &tmp);
|
|
|
|
andR(con, 4, b, &tmp, dst);
|
2010-09-03 18:32:22 +01:00
|
|
|
|
|
|
|
if (useTemporary) {
|
2012-06-12 17:00:45 -06:00
|
|
|
con->client->releaseTemporary(tmp.low);
|
2010-09-03 18:32:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRR(con, size, b, size, dst);
|
2010-09-03 18:32:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
orR(Context* con, unsigned size, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
if (size == 8) emit(con, orr(dst->high, a->high, b->high));
|
|
|
|
emit(con, orr(dst->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-07-12 14:18:36 -06:00
|
|
|
xorR(Context* con, unsigned size, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
Assembler::Register* b, Assembler::Register* dst)
|
|
|
|
{
|
2010-07-12 14:18:36 -06:00
|
|
|
if (size == 8) emit(con, eor(dst->high, a->high, b->high));
|
|
|
|
emit(con, eor(dst->low, a->low, b->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveAR2(Context* con, unsigned srcSize, Assembler::Address* src,
|
2010-11-13 19:28:05 -07:00
|
|
|
unsigned dstSize, Assembler::Register* dst)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, srcSize == 4 and dstSize == 4);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
Assembler::Constant constant(src->address);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCR(con, srcSize, &constant, dstSize, dst);
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
Assembler::Memory memory(dst->low, 0, -1, 0);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveMR(con, dstSize, &memory, dstSize, dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveAR(Context* con, unsigned srcSize, Assembler::Address* src,
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned dstSize, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
moveAR2(con, srcSize, src, dstSize, dst);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
compareRR(Context* con, unsigned aSize, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize UNUSED, Assembler::Register* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
|
2010-11-09 11:36:38 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(a)) { // GPR compare
|
|
|
|
assert(con, aSize == 4 && bSize == 4);
|
|
|
|
/**///assert(con, b->low != a->low);
|
|
|
|
emit(con, cmp(b->low, a->low));
|
|
|
|
} else { // FPR compare
|
|
|
|
assert(con, aSize == bSize);
|
|
|
|
if (aSize == 8) emit(con, fcmpd(fpr64(b), fpr64(a))); // double
|
|
|
|
else emit(con, fcmps(fpr32(b), fpr32(a))); // single
|
|
|
|
emit(con, fmstat());
|
2012-04-02 12:55:23 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
compareCR(Context* con, unsigned aSize, Assembler::Constant* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize, Assembler::Register* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, aSize == 4 and bSize == 4);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-04-02 12:55:23 -06:00
|
|
|
if (!isFpr(b) && a->value->resolved() &&
|
|
|
|
isOfWidth(a->value->value(), 8)) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, cmpi(b->low, a->value->value()));
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveCR(con, aSize, a, bSize, &tmp);
|
|
|
|
compareRR(con, bSize, &tmp, bSize, b);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
compareCM(Context* con, unsigned aSize, Assembler::Constant* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize, Assembler::Memory* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, aSize == 4 and bSize == 4);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveMR(con, bSize, b, bSize, &tmp);
|
|
|
|
compareCR(con, aSize, a, bSize, &tmp);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
compareRM(Context* con, unsigned aSize, Assembler::Register* a,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize, Assembler::Memory* b)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, aSize == 4 and bSize == 4);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveMR(con, bSize, b, bSize, &tmp);
|
|
|
|
compareRR(con, aSize, a, bSize, &tmp);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
int32_t
|
2012-06-12 17:00:45 -06:00
|
|
|
branch(Context* con, TernaryOperation op)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2010-07-12 14:18:36 -06:00
|
|
|
switch (op) {
|
|
|
|
case JumpIfEqual:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatEqual:
|
2010-07-12 14:18:36 -06:00
|
|
|
return beq(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfNotEqual:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatNotEqual:
|
2010-07-12 14:18:36 -06:00
|
|
|
return bne(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfLess:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
2010-07-12 14:18:36 -06:00
|
|
|
return blt(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfGreater:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatGreater:
|
2010-07-12 14:18:36 -06:00
|
|
|
return bgt(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfLessOrEqual:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
2010-07-12 14:18:36 -06:00
|
|
|
return ble(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfGreaterOrEqual:
|
2012-04-02 12:55:23 -06:00
|
|
|
case JumpIfFloatGreaterOrEqual:
|
2010-07-12 14:18:36 -06:00
|
|
|
return bge(0);
|
2012-04-02 12:55:23 -06:00
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
return bhi(0);
|
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
|
|
|
return bpl(0);
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
default:
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(Context* con, int32_t branch, Assembler::Constant* target)
|
2010-07-12 14:18:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
appendOffsetTask(con, target->value, offset(con));
|
|
|
|
emit(con, branch);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
branch(Context* con, TernaryOperation op, Assembler::Constant* target)
|
2010-07-12 14:18:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, branch(con, op), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
branchLong(Context* con, TernaryOperation op, Assembler::Operand* al,
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Operand* ah, Assembler::Operand* bl,
|
|
|
|
Assembler::Operand* bh, Assembler::Constant* target,
|
|
|
|
BinaryOperationType compareSigned,
|
|
|
|
BinaryOperationType compareUnsigned)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
compareSigned(con, 4, ah, 4, bh);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned next = 0;
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case JumpIfEqual:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatEqual:
|
2012-06-12 17:00:45 -06:00
|
|
|
next = con->code.length();
|
|
|
|
emit(con, bne(0));
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareSigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, beq(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
|
|
|
|
|
|
|
case JumpIfNotEqual:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatNotEqual:
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, bne(0), target);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareSigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, bne(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfLess:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatLess:
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, blt(0), target);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
next = con->code.length();
|
|
|
|
emit(con, bgt(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareUnsigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, blo(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfGreater:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatGreater:
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, bgt(0), target);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
next = con->code.length();
|
|
|
|
emit(con, blt(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareUnsigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, bhi(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfLessOrEqual:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatLessOrEqual:
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, blt(0), target);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
next = con->code.length();
|
|
|
|
emit(con, bgt(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareUnsigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, bls(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfGreaterOrEqual:
|
2012-06-04 12:39:53 -06:00
|
|
|
case JumpIfFloatGreaterOrEqual:
|
2012-06-12 17:00:45 -06:00
|
|
|
conditional(con, bgt(0), target);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
next = con->code.length();
|
|
|
|
emit(con, blt(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
compareUnsigned(con, 4, al, 4, bl);
|
|
|
|
conditional(con, bhs(0), target);
|
2010-07-12 14:18:36 -06:00
|
|
|
break;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
default:
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(con);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
if (next) {
|
|
|
|
updateOffset
|
2012-06-12 17:00:45 -06:00
|
|
|
(con->s, con->code.data + next, reinterpret_cast<intptr_t>
|
|
|
|
(con->code.data + con->code.length()));
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
branchRR(Context* con, TernaryOperation op, unsigned size,
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Register* a, Assembler::Register* b,
|
|
|
|
Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
if (!isFpr(a) && size > TargetBytesPerWord) {
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Register ah(a->high);
|
|
|
|
Assembler::Register bh(b->high);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
branchLong(con, op, a, &ah, b, &bh, target, CAST2(compareRR),
|
2010-07-12 14:18:36 -06:00
|
|
|
CAST2(compareRR));
|
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
compareRR(con, size, a, size, b);
|
|
|
|
branch(con, op, target);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-04-02 12:55:23 -06:00
|
|
|
branchCR(Context* con, TernaryOperation op, unsigned size,
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Constant* a, Assembler::Register* b,
|
|
|
|
Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-04-02 12:55:23 -06:00
|
|
|
assert(con, !isFloatBranch(op));
|
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
if (size > TargetBytesPerWord) {
|
2010-07-12 14:18:36 -06:00
|
|
|
int64_t v = a->value->value();
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise low(v & ~static_cast<target_uintptr_t>(0));
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Constant al(&low);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise high((v >> 32) & ~static_cast<target_uintptr_t>(0));
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Constant ah(&high);
|
|
|
|
|
|
|
|
Assembler::Register bh(b->high);
|
|
|
|
|
2012-04-02 12:55:23 -06:00
|
|
|
branchLong(con, op, &al, &ah, b, &bh, target, CAST2(compareCR),
|
2010-07-12 14:18:36 -06:00
|
|
|
CAST2(compareCR));
|
|
|
|
} else {
|
2012-04-02 12:55:23 -06:00
|
|
|
compareCR(con, size, a, size, b);
|
|
|
|
branch(con, op, target);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-04-02 12:55:23 -06:00
|
|
|
branchRM(Context* con, TernaryOperation op, unsigned size,
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Register* a, Assembler::Memory* b,
|
|
|
|
Assembler::Constant* target)
|
|
|
|
{
|
2012-04-02 12:55:23 -06:00
|
|
|
assert(con, !isFloatBranch(op));
|
|
|
|
assert(con, size <= TargetBytesPerWord);
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2012-04-02 12:55:23 -06:00
|
|
|
compareRM(con, size, a, size, b);
|
|
|
|
branch(con, op, target);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-04-02 12:55:23 -06:00
|
|
|
branchCM(Context* con, TernaryOperation op, unsigned size,
|
2010-07-12 14:18:36 -06:00
|
|
|
Assembler::Constant* a, Assembler::Memory* b,
|
|
|
|
Assembler::Constant* target)
|
|
|
|
{
|
2012-04-02 12:55:23 -06:00
|
|
|
assert(con, !isFloatBranch(op));
|
|
|
|
assert(con, size <= TargetBytesPerWord);
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2012-04-02 12:55:23 -06:00
|
|
|
compareCM(con, size, a, size, b);
|
|
|
|
branch(con, op, target);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
ShiftMaskPromise*
|
2012-06-12 17:00:45 -06:00
|
|
|
shiftMaskPromise(Context* con, Promise* base, unsigned shift, int64_t mask)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
return new(con->zone) ShiftMaskPromise(base, shift, mask);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCM(Context* con, unsigned srcSize, Assembler::Constant* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize, Assembler::Memory* dst)
|
|
|
|
{
|
|
|
|
switch (dstSize) {
|
|
|
|
case 8: {
|
|
|
|
Assembler::Constant srcHigh
|
2012-06-12 17:00:45 -06:00
|
|
|
(shiftMaskPromise(con, src->value, 32, 0xFFFFFFFF));
|
2009-08-06 11:52:36 -06:00
|
|
|
Assembler::Constant srcLow
|
2012-06-12 17:00:45 -06:00
|
|
|
(shiftMaskPromise(con, src->value, 0, 0xFFFFFFFF));
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
Assembler::Memory dstLow
|
|
|
|
(dst->base, dst->offset + 4, dst->index, dst->scale);
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCM(con, 4, &srcLow, 4, &dstLow);
|
|
|
|
moveCM(con, 4, &srcHigh, 4, dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
} break;
|
|
|
|
|
|
|
|
default:
|
2012-06-12 17:00:45 -06:00
|
|
|
Assembler::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
|
|
|
moveCR(con, srcSize, src, dstSize, &tmp);
|
|
|
|
moveRM(con, dstSize, &tmp, dstSize, dst);
|
|
|
|
con->client->releaseTemporary(tmp.low);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
negateRR(Context* con, unsigned srcSize, Assembler::Register* src,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned dstSize UNUSED, Assembler::Register* dst)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, srcSize == dstSize);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, mvn(dst->low, src->low));
|
|
|
|
emit(con, SETS(addi(dst->low, dst->low, 1)));
|
2009-08-06 11:52:36 -06:00
|
|
|
if (srcSize == 8) {
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, mvn(dst->high, src->high));
|
|
|
|
emit(con, adci(dst->high, dst->high, 0));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
callR(Context* con, unsigned size UNUSED, Assembler::Register* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
|
|
|
emit(con, blx(target->low));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
callC(Context* con, unsigned size UNUSED, Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
appendOffsetTask(con, target->value, offset(con));
|
|
|
|
emit(con, bl(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
longCallC(Context* con, unsigned size UNUSED, Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-08-27 18:52:33 -06:00
|
|
|
Assembler::Register tmp(4);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCR2(con, TargetBytesPerWord, target, &tmp, offset(con));
|
|
|
|
callR(con, TargetBytesPerWord, &tmp);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
longJumpC(Context* con, unsigned size UNUSED, Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-08-27 18:52:33 -06:00
|
|
|
Assembler::Register tmp(4); // a non-arg reg that we don't mind clobbering
|
2012-06-12 17:00:45 -06:00
|
|
|
moveCR2(con, TargetBytesPerWord, target, &tmp, offset(con));
|
|
|
|
jumpR(con, TargetBytesPerWord, &tmp);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
jumpC(Context* con, unsigned size UNUSED, Assembler::Constant* target)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, size == TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
appendOffsetTask(con, target->value, offset(con));
|
|
|
|
emit(con, b(0));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
return_(Context* con)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, bx(LinkRegister));
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
trap(Context* con)
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
emit(con, bkpt(0));
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2010-07-12 14:18:36 -06:00
|
|
|
memoryBarrier(Context*) {}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
// END OPERATION COMPILERS
|
|
|
|
|
2011-01-29 11:10:54 -07:00
|
|
|
unsigned
|
|
|
|
argumentFootprint(unsigned footprint)
|
|
|
|
{
|
|
|
|
return max(pad(footprint, StackAlignmentInWords), StackAlignmentInWords);
|
|
|
|
}
|
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
nextFrame(ArchitectureContext* con, uint32_t* start, unsigned size UNUSED,
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
unsigned footprint, void* link, bool,
|
2011-01-28 17:16:08 -07:00
|
|
|
unsigned targetParameterFootprint UNUSED, void** ip, void** stack)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(con, *ip >= start);
|
|
|
|
assert(con, *ip <= start + (size / TargetBytesPerWord));
|
2011-01-28 17:16:08 -07:00
|
|
|
|
|
|
|
uint32_t* instruction = static_cast<uint32_t*>(*ip);
|
|
|
|
|
|
|
|
if ((*start >> 20) == 0xe59) {
|
|
|
|
// skip stack overflow check
|
|
|
|
start += 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (instruction <= start) {
|
|
|
|
*ip = link;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-01-29 11:10:54 -07:00
|
|
|
unsigned offset = footprint + FrameHeaderSize;
|
2011-01-28 17:16:08 -07:00
|
|
|
|
|
|
|
if (instruction <= start + 2) {
|
|
|
|
*ip = link;
|
2011-01-29 18:09:47 -07:00
|
|
|
*stack = static_cast<void**>(*stack) + offset;
|
|
|
|
return;
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*instruction == 0xe12fff1e) { // return
|
|
|
|
*ip = link;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (TailCalls) {
|
|
|
|
if (argumentFootprint(targetParameterFootprint) > StackAlignmentInWords) {
|
|
|
|
offset += argumentFootprint(targetParameterFootprint)
|
|
|
|
- StackAlignmentInWords;
|
|
|
|
}
|
|
|
|
|
2011-01-29 18:09:47 -07:00
|
|
|
// check for post-non-tail-call stack adjustment of the form "add
|
|
|
|
// sp, sp, #offset":
|
|
|
|
if ((*instruction >> 12) == 0xe24dd) {
|
|
|
|
unsigned value = *instruction & 0xff;
|
|
|
|
unsigned rotation = (*instruction >> 8) & 0xf;
|
|
|
|
switch (rotation) {
|
2011-09-01 10:51:56 -06:00
|
|
|
case 0: offset -= value / TargetBytesPerWord; break;
|
2011-01-29 18:09:47 -07:00
|
|
|
case 15: offset -= value; break;
|
2012-06-12 17:00:45 -06:00
|
|
|
default: abort(con);
|
2011-01-29 18:09:47 -07:00
|
|
|
}
|
|
|
|
}
|
2011-01-28 17:16:08 -07:00
|
|
|
|
2011-01-29 18:09:47 -07:00
|
|
|
// todo: check for and handle tail calls
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
|
2011-01-29 18:09:47 -07:00
|
|
|
*ip = static_cast<void**>(*stack)[offset - 1];
|
|
|
|
*stack = static_cast<void**>(*stack) + offset;
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
void
|
2012-06-12 17:00:45 -06:00
|
|
|
populateTables(ArchitectureContext* con)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
|
|
|
const OperandType C = ConstantOperand;
|
|
|
|
const OperandType A = AddressOperand;
|
|
|
|
const OperandType R = RegisterOperand;
|
|
|
|
const OperandType M = MemoryOperand;
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
OperationType* zo = con->operations;
|
|
|
|
UnaryOperationType* uo = con->unaryOperations;
|
|
|
|
BinaryOperationType* bo = con->binaryOperations;
|
|
|
|
TernaryOperationType* to = con->ternaryOperations;
|
|
|
|
BranchOperationType* bro = con->branchOperations;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
zo[Return] = return_;
|
|
|
|
zo[LoadBarrier] = memoryBarrier;
|
|
|
|
zo[StoreStoreBarrier] = memoryBarrier;
|
|
|
|
zo[StoreLoadBarrier] = memoryBarrier;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
zo[Trap] = trap;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, LongCall, C)] = CAST1(longCallC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, AlignedLongCall, C)] = CAST1(longCallC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, LongJump, C)] = CAST1(longJumpC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, AlignedLongJump, C)] = CAST1(longJumpC);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, Jump, R)] = CAST1(jumpR);
|
|
|
|
uo[index(con, Jump, C)] = CAST1(jumpC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, AlignedJump, R)] = CAST1(jumpR);
|
|
|
|
uo[index(con, AlignedJump, C)] = CAST1(jumpC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, Call, C)] = CAST1(callC);
|
|
|
|
uo[index(con, Call, R)] = CAST1(callR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
uo[index(con, AlignedCall, C)] = CAST1(callC);
|
|
|
|
uo[index(con, AlignedCall, R)] = CAST1(callR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
bo[index(con, Move, R, R)] = CAST2(moveRR);
|
|
|
|
bo[index(con, Move, C, R)] = CAST2(moveCR);
|
|
|
|
bo[index(con, Move, C, M)] = CAST2(moveCM);
|
|
|
|
bo[index(con, Move, M, R)] = CAST2(moveMR);
|
|
|
|
bo[index(con, Move, R, M)] = CAST2(moveRM);
|
|
|
|
bo[index(con, Move, A, R)] = CAST2(moveAR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
bo[index(con, MoveZ, R, R)] = CAST2(moveZRR);
|
|
|
|
bo[index(con, MoveZ, M, R)] = CAST2(moveZMR);
|
|
|
|
bo[index(con, MoveZ, C, R)] = CAST2(moveCR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
bo[index(con, Negate, R, R)] = CAST2(negateRR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
bo[index(con, FloatAbsolute, R, R)] = CAST2(floatAbsoluteRR);
|
|
|
|
bo[index(con, FloatNegate, R, R)] = CAST2(floatNegateRR);
|
|
|
|
bo[index(con, Float2Float, R, R)] = CAST2(float2FloatRR);
|
|
|
|
bo[index(con, Float2Int, R, R)] = CAST2(float2IntRR);
|
|
|
|
bo[index(con, Int2Float, R, R)] = CAST2(int2FloatRR);
|
|
|
|
bo[index(con, FloatSquareRoot, R, R)] = CAST2(floatSqrtRR);
|
2011-12-06 09:24:30 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, Add, R)] = CAST3(addR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, Subtract, R)] = CAST3(subR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, Multiply, R)] = CAST3(multiplyR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, FloatAdd, R)] = CAST3(floatAddR);
|
|
|
|
to[index(con, FloatSubtract, R)] = CAST3(floatSubtractR);
|
|
|
|
to[index(con, FloatMultiply, R)] = CAST3(floatMultiplyR);
|
|
|
|
to[index(con, FloatDivide, R)] = CAST3(floatDivideR);
|
2011-09-29 14:25:31 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, ShiftLeft, R)] = CAST3(shiftLeftR);
|
|
|
|
to[index(con, ShiftLeft, C)] = CAST3(shiftLeftC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, ShiftRight, R)] = CAST3(shiftRightR);
|
|
|
|
to[index(con, ShiftRight, C)] = CAST3(shiftRightC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, UnsignedShiftRight, R)] = CAST3(unsignedShiftRightR);
|
|
|
|
to[index(con, UnsignedShiftRight, C)] = CAST3(unsignedShiftRightC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, And, R)] = CAST3(andR);
|
|
|
|
to[index(con, And, C)] = CAST3(andC);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, Or, R)] = CAST3(orR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
to[index(con, Xor, R)] = CAST3(xorR);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
bro[branchIndex(con, R, R)] = CAST_BRANCH(branchRR);
|
|
|
|
bro[branchIndex(con, C, R)] = CAST_BRANCH(branchCR);
|
|
|
|
bro[branchIndex(con, C, M)] = CAST_BRANCH(branchCM);
|
|
|
|
bro[branchIndex(con, R, M)] = CAST_BRANCH(branchRM);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
class MyArchitecture: public Assembler::Architecture {
|
|
|
|
public:
|
2012-06-12 17:00:45 -06:00
|
|
|
MyArchitecture(System* system): con(system), referenceCount(0) {
|
|
|
|
populateTables(&con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual unsigned floatRegisterSize() {
|
2012-06-12 17:00:45 -06:00
|
|
|
return vfpSupported() ? 8 : 0;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual uint32_t generalRegisterMask() {
|
2011-12-06 09:24:30 -07:00
|
|
|
return GPR_MASK;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual uint32_t floatRegisterMask() {
|
2011-12-06 09:24:30 -07:00
|
|
|
return vfpSupported() ? FPR_MASK : 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2011-09-23 22:21:54 -06:00
|
|
|
virtual int scratch() {
|
|
|
|
return 5;
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual int stack() {
|
|
|
|
return StackRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual int thread() {
|
|
|
|
return ThreadRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual int returnLow() {
|
2010-07-12 14:18:36 -06:00
|
|
|
return 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual int returnHigh() {
|
2010-07-12 14:18:36 -06:00
|
|
|
return 1;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
virtual int virtualCallTarget() {
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual int virtualCallIndex() {
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual bool bigEndian() {
|
2009-08-06 11:52:36 -06:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual uintptr_t maximumImmediateJump() {
|
2010-08-24 17:59:01 -06:00
|
|
|
return 0x1FFFFFF;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool reserved(int register_) {
|
|
|
|
switch (register_) {
|
2010-11-08 00:41:44 +00:00
|
|
|
case LinkRegister:
|
2009-08-06 11:52:36 -06:00
|
|
|
case StackRegister:
|
|
|
|
case ThreadRegister:
|
2010-08-24 17:59:01 -06:00
|
|
|
case ProgramCounter:
|
2009-08-06 11:52:36 -06:00
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
virtual unsigned frameFootprint(unsigned footprint) {
|
|
|
|
return max(footprint, StackAlignmentInWords);
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual unsigned argumentFootprint(unsigned footprint) {
|
2011-01-29 11:10:54 -07:00
|
|
|
return ::argumentFootprint(footprint);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-16 19:05:05 -07:00
|
|
|
virtual bool argumentAlignment() {
|
2011-08-10 21:33:56 -06:00
|
|
|
#ifdef __APPLE__
|
|
|
|
return false;
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool argumentRegisterAlignment() {
|
|
|
|
#ifdef __APPLE__
|
|
|
|
return false;
|
|
|
|
#else
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-16 19:05:05 -07:00
|
|
|
return true;
|
2011-08-10 21:33:56 -06:00
|
|
|
#endif
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-16 19:05:05 -07:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual unsigned argumentRegisterCount() {
|
2009-10-29 14:14:44 -06:00
|
|
|
return 4;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual int argumentRegister(unsigned index) {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, index < argumentRegisterCount());
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-11-08 00:41:44 +00:00
|
|
|
return index;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2011-02-21 15:25:52 -07:00
|
|
|
|
|
|
|
virtual bool hasLinkRegister() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
virtual unsigned stackAlignmentInWords() {
|
|
|
|
return StackAlignmentInWords;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool matchCall(void* returnAddress, void* target) {
|
|
|
|
uint32_t* instruction = static_cast<uint32_t*>(returnAddress) - 1;
|
|
|
|
|
|
|
|
return *instruction == static_cast<uint32_t>
|
|
|
|
(bl(static_cast<uint8_t*>(target)
|
|
|
|
- reinterpret_cast<uint8_t*>(instruction)));
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual void updateCall(UnaryOperation op UNUSED,
|
2010-07-12 14:18:36 -06:00
|
|
|
void* returnAddress,
|
2009-08-06 11:52:36 -06:00
|
|
|
void* newTarget)
|
|
|
|
{
|
|
|
|
switch (op) {
|
|
|
|
case Call:
|
2010-07-12 14:18:36 -06:00
|
|
|
case Jump:
|
|
|
|
case AlignedCall:
|
|
|
|
case AlignedJump: {
|
2012-06-12 17:00:45 -06:00
|
|
|
updateOffset(con.s, static_cast<uint8_t*>(returnAddress) - 4,
|
2009-08-06 11:52:36 -06:00
|
|
|
reinterpret_cast<intptr_t>(newTarget));
|
|
|
|
} break;
|
|
|
|
|
2010-11-16 02:38:36 +00:00
|
|
|
case LongCall:
|
|
|
|
case LongJump:
|
2010-07-12 14:18:36 -06:00
|
|
|
case AlignedLongCall:
|
|
|
|
case AlignedLongJump: {
|
2010-11-16 02:38:36 +00:00
|
|
|
uint32_t* p = static_cast<uint32_t*>(returnAddress) - 2;
|
|
|
|
*reinterpret_cast<void**>(p + (((*p & PoolOffsetMask) + 8) / 4))
|
2010-07-12 14:18:36 -06:00
|
|
|
= newTarget;
|
2009-08-06 11:52:36 -06:00
|
|
|
} break;
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
default: abort(&con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
virtual unsigned constantCallSize() {
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
virtual void setConstant(void* dst, uint64_t constant) {
|
|
|
|
*static_cast<target_uintptr_t*>(dst) = constant;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual unsigned alignFrameSize(unsigned sizeInWords) {
|
2011-01-25 17:22:43 -07:00
|
|
|
return pad(sizeInWords + FrameHeaderSize, StackAlignmentInWords)
|
|
|
|
- FrameHeaderSize;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
void* link, bool mostRecent,
|
2011-01-28 17:16:08 -07:00
|
|
|
unsigned targetParameterFootprint, void** ip,
|
|
|
|
void** stack)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
::nextFrame(&con, static_cast<uint32_t*>(start), size, footprint, link,
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-04 18:35:13 -06:00
|
|
|
mostRecent, targetParameterFootprint, ip, stack);
|
2011-01-28 17:16:08 -07:00
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual void* frameIp(void* stack) {
|
2010-08-27 18:52:33 -06:00
|
|
|
return stack ? static_cast<void**>(stack)[returnAddressOffset()] : 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual unsigned frameHeaderSize() {
|
2010-07-12 14:18:36 -06:00
|
|
|
return FrameHeaderSize;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual unsigned frameReturnAddressSize() {
|
2010-08-27 18:52:33 -06:00
|
|
|
return 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual unsigned frameFooterSize() {
|
2011-01-25 17:22:43 -07:00
|
|
|
return 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
virtual int returnAddressOffset() {
|
2011-01-25 17:22:43 -07:00
|
|
|
return -1;
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual int framePointerOffset() {
|
2011-01-28 17:16:08 -07:00
|
|
|
return 0;
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual BinaryOperation hasBinaryIntrinsic(Thread*, object) {
|
|
|
|
return NoBinaryOperation;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual TernaryOperation hasTernaryIntrinsic(Thread*, object) {
|
|
|
|
return NoTernaryOperation;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool alwaysCondensed(BinaryOperation) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool alwaysCondensed(TernaryOperation) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual void plan
|
|
|
|
(UnaryOperation,
|
|
|
|
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
|
|
|
bool* thunk)
|
|
|
|
{
|
|
|
|
*aTypeMask = (1 << RegisterOperand) | (1 << ConstantOperand);
|
|
|
|
*aRegisterMask = ~static_cast<uint64_t>(0);
|
|
|
|
*thunk = false;
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual void planSource
|
2009-08-06 11:52:36 -06:00
|
|
|
(BinaryOperation op,
|
2011-12-06 09:24:30 -07:00
|
|
|
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
|
|
|
unsigned bSize, bool* thunk)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
2012-01-18 12:41:51 -07:00
|
|
|
*thunk = false;
|
|
|
|
*aTypeMask = ~0;
|
2012-08-13 09:19:42 +00:00
|
|
|
*aRegisterMask = GPR_MASK64;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
switch (op) {
|
2012-01-18 12:41:51 -07:00
|
|
|
case Negate:
|
|
|
|
*aTypeMask = (1 << RegisterOperand);
|
2012-06-12 17:00:45 -06:00
|
|
|
*aRegisterMask = GPR_MASK64;
|
2012-01-18 12:41:51 -07:00
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case Absolute:
|
2012-06-04 12:39:53 -06:00
|
|
|
*thunk = true;
|
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case FloatAbsolute:
|
|
|
|
case FloatSquareRoot:
|
|
|
|
case FloatNegate:
|
|
|
|
case Float2Float:
|
2012-01-26 18:26:29 -07:00
|
|
|
if (vfpSupported()) {
|
|
|
|
*aTypeMask = (1 << RegisterOperand);
|
2012-06-04 12:39:53 -06:00
|
|
|
*aRegisterMask = FPR_MASK64;
|
2012-01-26 18:26:29 -07:00
|
|
|
} else {
|
2011-12-06 09:24:30 -07:00
|
|
|
*thunk = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case Float2Int:
|
2012-08-12 14:31:58 -06:00
|
|
|
// todo: Java requires different semantics than SSE for
|
|
|
|
// converting floats to integers, we we need to either use
|
|
|
|
// thunks or produce inline machine code which handles edge
|
|
|
|
// cases properly.
|
|
|
|
if (false && vfpSupported() && bSize == 4) {
|
2012-01-26 18:26:29 -07:00
|
|
|
*aTypeMask = (1 << RegisterOperand);
|
2012-06-04 12:39:53 -06:00
|
|
|
*aRegisterMask = FPR_MASK64;
|
2012-01-26 18:26:29 -07:00
|
|
|
} else {
|
2011-12-06 09:24:30 -07:00
|
|
|
*thunk = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case Int2Float:
|
2012-06-12 17:00:45 -06:00
|
|
|
if (vfpSupported() && aSize == 4) {
|
2012-01-26 18:26:29 -07:00
|
|
|
*aTypeMask = (1 << RegisterOperand);
|
2012-06-04 12:39:53 -06:00
|
|
|
*aRegisterMask = GPR_MASK64;
|
2012-01-26 18:26:29 -07:00
|
|
|
} else {
|
2011-12-06 09:24:30 -07:00
|
|
|
*thunk = true;
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void planDestination
|
|
|
|
(BinaryOperation op,
|
2012-06-04 12:39:53 -06:00
|
|
|
unsigned, uint8_t aTypeMask, uint64_t,
|
|
|
|
unsigned , uint8_t* bTypeMask, uint64_t* bRegisterMask)
|
2010-07-12 14:18:36 -06:00
|
|
|
{
|
|
|
|
*bTypeMask = (1 << RegisterOperand) | (1 << MemoryOperand);
|
2012-08-13 09:19:42 +00:00
|
|
|
*bRegisterMask = GPR_MASK64;
|
2010-07-12 14:18:36 -06:00
|
|
|
|
|
|
|
switch (op) {
|
2009-08-06 11:52:36 -06:00
|
|
|
case Negate:
|
|
|
|
*bTypeMask = (1 << RegisterOperand);
|
2012-06-12 17:00:45 -06:00
|
|
|
*bRegisterMask = GPR_MASK64;
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
|
|
|
|
2012-06-04 12:39:53 -06:00
|
|
|
case FloatAbsolute:
|
|
|
|
case FloatSquareRoot:
|
|
|
|
case FloatNegate:
|
|
|
|
case Float2Float:
|
|
|
|
case Int2Float:
|
|
|
|
*bTypeMask = (1 << RegisterOperand);
|
|
|
|
*bRegisterMask = FPR_MASK64;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Float2Int:
|
|
|
|
*bTypeMask = (1 << RegisterOperand);
|
|
|
|
*bRegisterMask = GPR_MASK64;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Move:
|
|
|
|
if (!(aTypeMask & 1 << RegisterOperand)) {
|
|
|
|
*bTypeMask = 1 << RegisterOperand;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual void planMove
|
|
|
|
(unsigned, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
|
|
|
|
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
|
2012-06-04 12:39:53 -06:00
|
|
|
uint8_t dstTypeMask, uint64_t dstRegisterMask)
|
2010-07-12 14:18:36 -06:00
|
|
|
{
|
|
|
|
*srcTypeMask = ~0;
|
|
|
|
*srcRegisterMask = ~static_cast<uint64_t>(0);
|
|
|
|
|
|
|
|
*tmpTypeMask = 0;
|
|
|
|
*tmpRegisterMask = 0;
|
|
|
|
|
|
|
|
if (dstTypeMask & (1 << MemoryOperand)) {
|
|
|
|
// can't move directly from memory or constant to memory
|
|
|
|
*srcTypeMask = 1 << RegisterOperand;
|
|
|
|
*tmpTypeMask = 1 << RegisterOperand;
|
2012-06-04 12:39:53 -06:00
|
|
|
*tmpRegisterMask = GPR_MASK64;
|
2012-06-12 17:00:45 -06:00
|
|
|
} else if (vfpSupported() &&
|
|
|
|
dstTypeMask & 1 << RegisterOperand &&
|
2012-06-04 12:39:53 -06:00
|
|
|
dstRegisterMask & FPR_MASK) {
|
|
|
|
*srcTypeMask = *tmpTypeMask = 1 << RegisterOperand |
|
|
|
|
1 << MemoryOperand;
|
2010-07-12 14:18:36 -06:00
|
|
|
*tmpRegisterMask = ~static_cast<uint64_t>(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void planSource
|
2009-08-06 11:52:36 -06:00
|
|
|
(TernaryOperation op,
|
2010-08-31 18:35:55 -06:00
|
|
|
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
|
|
|
|
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned, bool* thunk)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
|
|
|
*aTypeMask = (1 << RegisterOperand) | (1 << ConstantOperand);
|
2012-06-12 17:00:45 -06:00
|
|
|
*aRegisterMask = GPR_MASK64;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
*bTypeMask = (1 << RegisterOperand);
|
2012-06-12 17:00:45 -06:00
|
|
|
*bRegisterMask = GPR_MASK64;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
*thunk = false;
|
|
|
|
|
|
|
|
switch (op) {
|
2010-08-31 18:35:55 -06:00
|
|
|
case ShiftLeft:
|
|
|
|
case ShiftRight:
|
|
|
|
case UnsignedShiftRight:
|
|
|
|
if (bSize == 8) *aTypeMask = *bTypeMask = (1 << RegisterOperand);
|
|
|
|
break;
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
case Add:
|
|
|
|
case Subtract:
|
2010-08-30 16:13:10 -06:00
|
|
|
case Or:
|
|
|
|
case Xor:
|
2009-08-06 11:52:36 -06:00
|
|
|
case Multiply:
|
|
|
|
*aTypeMask = *bTypeMask = (1 << RegisterOperand);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Divide:
|
2009-10-29 14:14:44 -06:00
|
|
|
case Remainder:
|
2012-06-04 12:39:53 -06:00
|
|
|
case FloatRemainder:
|
2012-01-26 18:26:29 -07:00
|
|
|
*thunk = true;
|
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case FloatAdd:
|
|
|
|
case FloatSubtract:
|
2011-12-06 09:24:30 -07:00
|
|
|
case FloatMultiply:
|
2010-07-12 14:18:36 -06:00
|
|
|
case FloatDivide:
|
2012-06-04 12:39:53 -06:00
|
|
|
if (vfpSupported()) {
|
|
|
|
*aTypeMask = *bTypeMask = (1 << RegisterOperand);
|
|
|
|
*aRegisterMask = *bRegisterMask = FPR_MASK64;
|
|
|
|
} else {
|
|
|
|
*thunk = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
case JumpIfFloatEqual:
|
|
|
|
case JumpIfFloatNotEqual:
|
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatGreater:
|
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatGreaterOrEqual:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
2012-04-02 12:55:23 -06:00
|
|
|
if (vfpSupported()) {
|
|
|
|
*aTypeMask = *bTypeMask = (1 << RegisterOperand);
|
2012-06-04 12:39:53 -06:00
|
|
|
*aRegisterMask = *bRegisterMask = FPR_MASK64;
|
2012-04-02 12:55:23 -06:00
|
|
|
} else {
|
|
|
|
*thunk = true;
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-07-12 14:18:36 -06:00
|
|
|
virtual void planDestination
|
|
|
|
(TernaryOperation op,
|
|
|
|
unsigned, uint8_t, uint64_t,
|
2012-08-13 09:19:42 +00:00
|
|
|
unsigned, uint8_t, const uint64_t bRegisterMask,
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask)
|
|
|
|
{
|
|
|
|
if (isBranch(op)) {
|
|
|
|
*cTypeMask = (1 << ConstantOperand);
|
|
|
|
*cRegisterMask = 0;
|
|
|
|
} else {
|
|
|
|
*cTypeMask = (1 << RegisterOperand);
|
2012-08-13 09:19:42 +00:00
|
|
|
*cRegisterMask = bRegisterMask;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void acquire() {
|
|
|
|
++ referenceCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void release() {
|
|
|
|
if (-- referenceCount == 0) {
|
2012-06-12 17:00:45 -06:00
|
|
|
con.s->free(this);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
ArchitectureContext con;
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned referenceCount;
|
|
|
|
};
|
|
|
|
|
|
|
|
class MyAssembler: public Assembler {
|
|
|
|
public:
|
|
|
|
MyAssembler(System* s, Allocator* a, Zone* zone, MyArchitecture* arch):
|
2012-06-12 17:00:45 -06:00
|
|
|
con(s, a, zone), arch_(arch)
|
2009-08-06 11:52:36 -06:00
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual void setClient(Client* client) {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, con.client == 0);
|
|
|
|
con.client = client;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Architecture* arch() {
|
|
|
|
return arch_;
|
|
|
|
}
|
|
|
|
|
2011-01-30 14:14:57 -07:00
|
|
|
virtual void checkStackOverflow(uintptr_t handler,
|
|
|
|
unsigned stackLimitOffsetFromThread)
|
|
|
|
{
|
|
|
|
Register stack(StackRegister);
|
|
|
|
Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
|
2012-06-12 17:00:45 -06:00
|
|
|
Constant handlerConstant(new(con.zone) ResolvedPromise(handler));
|
|
|
|
branchRM(&con, JumpIfGreaterOrEqual, TargetBytesPerWord, &stack, &stackLimit,
|
2011-01-30 14:14:57 -07:00
|
|
|
&handlerConstant);
|
|
|
|
}
|
|
|
|
|
2011-02-19 20:33:26 -07:00
|
|
|
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset) {
|
|
|
|
Register link(LinkRegister);
|
|
|
|
Memory linkDst(ThreadRegister, ipOffset);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst);
|
2011-02-19 20:33:26 -07:00
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
Register stack(StackRegister);
|
|
|
|
Memory stackDst(ThreadRegister, stackOffset);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void pushFrame(unsigned argumentCount, ...) {
|
|
|
|
struct {
|
|
|
|
unsigned size;
|
|
|
|
OperandType type;
|
|
|
|
Operand* operand;
|
|
|
|
} arguments[argumentCount];
|
|
|
|
|
|
|
|
va_list a; va_start(a, argumentCount);
|
|
|
|
unsigned footprint = 0;
|
|
|
|
for (unsigned i = 0; i < argumentCount; ++i) {
|
|
|
|
arguments[i].size = va_arg(a, unsigned);
|
|
|
|
arguments[i].type = static_cast<OperandType>(va_arg(a, int));
|
|
|
|
arguments[i].operand = va_arg(a, Operand*);
|
2011-09-01 10:51:56 -06:00
|
|
|
footprint += ceiling(arguments[i].size, TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
va_end(a);
|
|
|
|
|
|
|
|
allocateFrame(arch_->alignFrameSize(footprint));
|
|
|
|
|
|
|
|
unsigned offset = 0;
|
|
|
|
for (unsigned i = 0; i < argumentCount; ++i) {
|
|
|
|
if (i < arch_->argumentRegisterCount()) {
|
|
|
|
Register dst(arch_->argumentRegister(i));
|
|
|
|
|
|
|
|
apply(Move,
|
|
|
|
arguments[i].size, arguments[i].type, arguments[i].operand,
|
2011-09-01 10:51:56 -06:00
|
|
|
pad(arguments[i].size, TargetBytesPerWord), RegisterOperand,
|
|
|
|
&dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2011-09-01 10:51:56 -06:00
|
|
|
Memory dst(StackRegister, offset * TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
|
|
|
apply(Move,
|
|
|
|
arguments[i].size, arguments[i].type, arguments[i].operand,
|
2011-09-01 10:51:56 -06:00
|
|
|
pad(arguments[i].size, TargetBytesPerWord), MemoryOperand, &dst);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void allocateFrame(unsigned footprint) {
|
2011-01-28 17:16:08 -07:00
|
|
|
footprint += FrameHeaderSize;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
// larger frames may require multiple subtract/add instructions
|
|
|
|
// to allocate/deallocate, and nextFrame will need to be taught
|
|
|
|
// how to handle them:
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, footprint < 256);
|
2011-01-28 17:16:08 -07:00
|
|
|
|
|
|
|
Register stack(StackRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
Constant footprintConstant(&footprintPromise);
|
2012-06-12 17:00:45 -06:00
|
|
|
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
2011-01-25 17:22:43 -07:00
|
|
|
|
|
|
|
Register returnAddress(LinkRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
Memory returnAddressDst
|
|
|
|
(StackRegister, (footprint - 1) * TargetBytesPerWord);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRM(&con, TargetBytesPerWord, &returnAddress, TargetBytesPerWord,
|
2011-09-01 10:51:56 -06:00
|
|
|
&returnAddressDst);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2011-01-25 17:22:43 -07:00
|
|
|
virtual void adjustFrame(unsigned difference) {
|
|
|
|
Register stack(StackRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise differencePromise(difference * TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
Constant differenceConstant(&differencePromise);
|
2012-06-12 17:00:45 -06:00
|
|
|
subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
2011-01-28 17:16:08 -07:00
|
|
|
virtual void popFrame(unsigned footprint) {
|
|
|
|
footprint += FrameHeaderSize;
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2010-08-27 18:52:33 -06:00
|
|
|
Register returnAddress(LinkRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
Memory returnAddressSrc
|
|
|
|
(StackRegister, (footprint - 1) * TargetBytesPerWord);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveMR(&con, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
|
2011-09-01 10:51:56 -06:00
|
|
|
&returnAddress);
|
2011-01-25 17:22:43 -07:00
|
|
|
|
|
|
|
Register stack(StackRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
Constant footprintConstant(&footprintPromise);
|
2012-06-12 17:00:45 -06:00
|
|
|
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void popFrameForTailCall(unsigned footprint,
|
|
|
|
int offset,
|
|
|
|
int returnAddressSurrogate,
|
2011-01-28 17:16:08 -07:00
|
|
|
int framePointerSurrogate UNUSED)
|
2009-10-29 10:12:30 -06:00
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, framePointerSurrogate == NoRegister);
|
2011-01-28 17:16:08 -07:00
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
if (TailCalls) {
|
|
|
|
if (offset) {
|
2011-01-29 18:09:47 -07:00
|
|
|
footprint += FrameHeaderSize;
|
|
|
|
|
2010-11-08 00:41:44 +00:00
|
|
|
Register link(LinkRegister);
|
|
|
|
Memory returnAddressSrc
|
2011-09-01 10:51:56 -06:00
|
|
|
(StackRegister, (footprint - 1) * TargetBytesPerWord);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveMR(&con, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
|
2011-09-01 10:51:56 -06:00
|
|
|
&link);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2011-01-25 17:22:43 -07:00
|
|
|
Register stack(StackRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise footprintPromise
|
|
|
|
((footprint - offset) * TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
Constant footprintConstant(&footprintPromise);
|
2012-06-12 17:00:45 -06:00
|
|
|
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
2010-11-08 00:41:44 +00:00
|
|
|
|
2009-10-29 10:12:30 -06:00
|
|
|
if (returnAddressSurrogate != NoRegister) {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, offset > 0);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
|
|
|
Register ras(returnAddressSurrogate);
|
2011-09-01 10:51:56 -06:00
|
|
|
Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
} else {
|
2011-01-25 17:22:43 -07:00
|
|
|
popFrame(footprint);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
abort(&con);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-25 17:22:43 -07:00
|
|
|
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
|
|
|
|
unsigned argumentFootprint)
|
|
|
|
{
|
|
|
|
popFrame(frameFootprint);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, argumentFootprint >= StackAlignmentInWords);
|
|
|
|
assert(&con, (argumentFootprint % StackAlignmentInWords) == 0);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2011-01-25 17:22:43 -07:00
|
|
|
unsigned offset;
|
2009-10-29 10:12:30 -06:00
|
|
|
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
2011-01-25 17:22:43 -07:00
|
|
|
offset = argumentFootprint - StackAlignmentInWords;
|
|
|
|
|
|
|
|
Register stack(StackRegister);
|
2011-09-01 10:51:56 -06:00
|
|
|
ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord);
|
2011-01-28 17:16:08 -07:00
|
|
|
Constant adjustment(&adjustmentPromise);
|
2012-06-12 17:00:45 -06:00
|
|
|
addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack);
|
2011-01-25 17:22:43 -07:00
|
|
|
} else {
|
|
|
|
offset = 0;
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
return_(&con);
|
2009-10-29 10:12:30 -06:00
|
|
|
}
|
|
|
|
|
2011-01-25 17:22:43 -07:00
|
|
|
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,
|
|
|
|
unsigned stackOffsetFromThread)
|
2009-10-29 10:12:30 -06:00
|
|
|
{
|
2011-01-25 17:22:43 -07:00
|
|
|
popFrame(frameFootprint);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
|
|
|
Register stack(StackRegister);
|
2011-01-25 17:22:43 -07:00
|
|
|
Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
|
2012-06-12 17:00:45 -06:00
|
|
|
moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack);
|
2009-10-29 10:12:30 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
return_(&con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void apply(Operation op) {
|
2012-06-12 17:00:45 -06:00
|
|
|
arch_->con.operations[op](&con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void apply(UnaryOperation op,
|
|
|
|
unsigned aSize, OperandType aType, Operand* aOperand)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
arch_->con.unaryOperations[index(&(arch_->con), op, aType)]
|
|
|
|
(&con, aSize, aOperand);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void apply(BinaryOperation op,
|
|
|
|
unsigned aSize, OperandType aType, Operand* aOperand,
|
|
|
|
unsigned bSize, OperandType bType, Operand* bOperand)
|
|
|
|
{
|
2012-06-12 17:00:45 -06:00
|
|
|
arch_->con.binaryOperations[index(&(arch_->con), op, aType, bType)]
|
|
|
|
(&con, aSize, aOperand, bSize, bOperand);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void apply(TernaryOperation op,
|
2010-07-12 14:18:36 -06:00
|
|
|
unsigned aSize, OperandType aType, Operand* aOperand,
|
2009-08-06 11:52:36 -06:00
|
|
|
unsigned bSize, OperandType bType UNUSED,
|
|
|
|
Operand* bOperand,
|
|
|
|
unsigned cSize UNUSED, OperandType cType UNUSED,
|
|
|
|
Operand* cOperand)
|
|
|
|
{
|
2010-07-12 14:18:36 -06:00
|
|
|
if (isBranch(op)) {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, aSize == bSize);
|
|
|
|
assert(&con, cSize == TargetBytesPerWord);
|
|
|
|
assert(&con, cType == ConstantOperand);
|
2009-08-06 11:52:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
arch_->con.branchOperations[branchIndex(&(arch_->con), aType, bType)]
|
|
|
|
(&con, op, aSize, aOperand, bOperand, cOperand);
|
2010-07-12 14:18:36 -06:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
assert(&con, bSize == cSize);
|
|
|
|
assert(&con, bType == RegisterOperand);
|
|
|
|
assert(&con, cType == RegisterOperand);
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
arch_->con.ternaryOperations[index(&(arch_->con), op, aType)]
|
|
|
|
(&con, bSize, aOperand, bOperand, cOperand);
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2011-02-27 23:03:13 -07:00
|
|
|
virtual void setDestination(uint8_t* dst) {
|
2012-06-12 17:00:45 -06:00
|
|
|
con.result = dst;
|
2011-02-27 23:03:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void write() {
|
2012-06-12 17:00:45 -06:00
|
|
|
uint8_t* dst = con.result;
|
2010-11-13 19:28:05 -07:00
|
|
|
unsigned dstOffset = 0;
|
2012-06-12 17:00:45 -06:00
|
|
|
for (MyBlock* b = con.firstBlock; b; b = b->next) {
|
2010-11-13 19:28:05 -07:00
|
|
|
if (DebugPool) {
|
|
|
|
fprintf(stderr, "write block %p\n", b);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned blockOffset = 0;
|
|
|
|
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
|
|
|
|
unsigned size = e->offset - blockOffset;
|
2012-06-12 17:00:45 -06:00
|
|
|
memcpy(dst + dstOffset, con.code.data + b->offset + blockOffset, size);
|
2010-11-13 19:28:05 -07:00
|
|
|
blockOffset = e->offset;
|
|
|
|
dstOffset += size;
|
|
|
|
|
|
|
|
unsigned poolSize = 0;
|
|
|
|
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
|
|
|
|
if (DebugPool) {
|
|
|
|
fprintf(stderr, "visit pool offset %p %d in block %p\n",
|
|
|
|
o, o->offset, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned entry = dstOffset + poolSize;
|
|
|
|
|
2011-05-11 19:56:29 -06:00
|
|
|
if (needJump(b)) {
|
2011-09-01 10:51:56 -06:00
|
|
|
entry += TargetBytesPerWord;
|
2011-02-27 23:03:13 -07:00
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
o->entry->address = dst + entry;
|
|
|
|
|
|
|
|
unsigned instruction = o->block->start
|
|
|
|
+ padding(o->block, o->offset) + o->offset;
|
|
|
|
|
|
|
|
int32_t v = (entry - 8) - instruction;
|
2012-06-12 17:00:45 -06:00
|
|
|
expect(&con, v == (v & PoolOffsetMask));
|
2010-11-13 19:28:05 -07:00
|
|
|
|
|
|
|
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
|
|
|
|
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
|
2011-02-27 23:03:13 -07:00
|
|
|
|
2011-09-01 10:51:56 -06:00
|
|
|
poolSize += TargetBytesPerWord;
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
|
2011-09-30 18:25:44 -06:00
|
|
|
bool jump = needJump(b);
|
|
|
|
if (jump) {
|
2011-09-01 10:51:56 -06:00
|
|
|
write4
|
|
|
|
(dst + dstOffset, ::b((poolSize + TargetBytesPerWord - 8) >> 2));
|
2011-02-27 23:03:13 -07:00
|
|
|
}
|
2010-11-13 19:28:05 -07:00
|
|
|
|
2011-09-30 18:25:44 -06:00
|
|
|
dstOffset += poolSize + (jump ? TargetBytesPerWord : 0);
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned size = b->size - blockOffset;
|
|
|
|
|
|
|
|
memcpy(dst + dstOffset,
|
2012-06-12 17:00:45 -06:00
|
|
|
con.code.data + b->offset + blockOffset,
|
2010-11-13 19:28:05 -07:00
|
|
|
size);
|
|
|
|
|
|
|
|
dstOffset += size;
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
2010-11-13 19:28:05 -07:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
for (Task* t = con.tasks; t; t = t->next) {
|
|
|
|
t->run(&con);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
2010-07-12 14:18:36 -06:00
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
for (ConstantPoolEntry* e = con.constantPool; e; e = e->next) {
|
2010-11-13 19:28:05 -07:00
|
|
|
if (e->constant->resolved()) {
|
2011-09-01 10:51:56 -06:00
|
|
|
*static_cast<target_uintptr_t*>(e->address) = e->constant->value();
|
2010-11-13 19:28:05 -07:00
|
|
|
} else {
|
|
|
|
new (e->constant->listen(sizeof(ConstantPoolListener)))
|
2012-06-12 17:00:45 -06:00
|
|
|
ConstantPoolListener(con.s, static_cast<target_uintptr_t*>(e->address),
|
2010-11-16 02:38:36 +00:00
|
|
|
e->callOffset
|
|
|
|
? dst + e->callOffset->value() + 8
|
|
|
|
: 0);
|
2010-11-13 19:28:05 -07:00
|
|
|
}
|
2010-07-12 14:18:36 -06:00
|
|
|
// fprintf(stderr, "constant %p at %p\n", reinterpret_cast<void*>(e->constant->value()), e->address);
|
|
|
|
}
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-12-07 15:57:11 -07:00
|
|
|
virtual Promise* offset(bool forTrace) {
|
2012-06-12 17:00:45 -06:00
|
|
|
return ::offset(&con, forTrace);
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Block* endBlock(bool startNew) {
|
2012-06-12 17:00:45 -06:00
|
|
|
MyBlock* b = con.lastBlock;
|
|
|
|
b->size = con.code.length() - b->offset;
|
2009-08-06 11:52:36 -06:00
|
|
|
if (startNew) {
|
2012-06-12 17:00:45 -06:00
|
|
|
con.lastBlock = new (con.zone) MyBlock(&con, con.code.length());
|
2009-08-06 11:52:36 -06:00
|
|
|
} else {
|
2012-06-12 17:00:45 -06:00
|
|
|
con.lastBlock = 0;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
virtual void endEvent() {
|
2012-06-12 17:00:45 -06:00
|
|
|
MyBlock* b = con.lastBlock;
|
|
|
|
unsigned thisEventOffset = con.code.length() - b->offset;
|
2010-11-13 19:28:05 -07:00
|
|
|
if (b->poolOffsetHead) {
|
2011-09-01 10:51:56 -06:00
|
|
|
int32_t v = (thisEventOffset + TargetBytesPerWord - 8)
|
2010-11-13 19:28:05 -07:00
|
|
|
- b->poolOffsetHead->offset;
|
|
|
|
|
|
|
|
if (v > 0 and v != (v & PoolOffsetMask)) {
|
|
|
|
appendPoolEvent
|
2012-06-12 17:00:45 -06:00
|
|
|
(&con, b, b->lastEventOffset, b->poolOffsetHead,
|
2010-11-13 19:28:05 -07:00
|
|
|
b->lastPoolOffsetTail);
|
|
|
|
|
|
|
|
if (DebugPool) {
|
|
|
|
for (PoolOffset* o = b->poolOffsetHead;
|
|
|
|
o != b->lastPoolOffsetTail->next; o = o->next)
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
|
|
|
"in endEvent, include %p %d in pool event %p at offset %d "
|
|
|
|
"in block %p\n",
|
|
|
|
o, o->offset, b->poolEventTail, b->lastEventOffset, b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b->poolOffsetHead = b->lastPoolOffsetTail->next;
|
|
|
|
b->lastPoolOffsetTail->next = 0;
|
|
|
|
if (b->poolOffsetHead == 0) {
|
|
|
|
b->poolOffsetTail = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b->lastEventOffset = thisEventOffset;
|
|
|
|
b->lastPoolOffsetTail = b->poolOffsetTail;
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2010-11-13 19:28:05 -07:00
|
|
|
virtual unsigned length() {
|
2012-06-12 17:00:45 -06:00
|
|
|
return con.code.length();
|
2010-07-12 14:18:36 -06:00
|
|
|
}
|
|
|
|
|
2011-01-30 14:14:57 -07:00
|
|
|
virtual unsigned footerSize() {
|
2011-01-28 17:16:08 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-08-06 11:52:36 -06:00
|
|
|
virtual void dispose() {
|
2012-06-12 17:00:45 -06:00
|
|
|
con.code.dispose();
|
2009-08-06 11:52:36 -06:00
|
|
|
}
|
|
|
|
|
2012-06-12 17:00:45 -06:00
|
|
|
Context con;
|
2009-08-06 11:52:36 -06:00
|
|
|
MyArchitecture* arch_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace vm {
|
|
|
|
|
|
|
|
Assembler::Architecture*
|
2010-07-12 14:18:36 -06:00
|
|
|
makeArchitecture(System* system, bool)
|
2009-08-06 11:52:36 -06:00
|
|
|
{
|
|
|
|
return new (allocate(system, sizeof(MyArchitecture))) MyArchitecture(system);
|
|
|
|
}
|
|
|
|
|
|
|
|
Assembler*
|
|
|
|
makeAssembler(System* system, Allocator* allocator, Zone* zone,
|
|
|
|
Assembler::Architecture* architecture)
|
|
|
|
{
|
2012-05-08 16:13:17 -06:00
|
|
|
return new(zone) MyAssembler(system, allocator, zone,
|
2009-08-06 11:52:36 -06:00
|
|
|
static_cast<MyArchitecture*>(architecture));
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace vm
|