From 76bfcaa8c099ff2f6987304e96008062eced817a Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Mon, 29 Dec 2014 18:11:54 -0700 Subject: [PATCH] fix ARM64 bootimage=true build This fixes a problem with atomically updating JIT-compiled static calls to AOT-compiled code. It turns out there was also a problem with the 32-bit ARM code as well, but we never hit it because it is extremely unlikely that a code address can be loaded with a single immediate load instruction on 32-bit ARM since it can only handle numbers with 8 significant bits. I've fixed that as well. --- src/codegen/target/arm/assembler.cpp | 5 +++++ src/codegen/target/arm/operations32.cpp | 13 ++++++++++++- src/codegen/target/arm/operations64.cpp | 2 +- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/codegen/target/arm/assembler.cpp b/src/codegen/target/arm/assembler.cpp index 5d36fdf2f6..831be22fe4 100644 --- a/src/codegen/target/arm/assembler.cpp +++ b/src/codegen/target/arm/assembler.cpp @@ -320,8 +320,13 @@ class MyArchitecture : public Architecture { case lir::AlignedLongCall: case lir::AlignedLongJump: { uint32_t* p = static_cast(returnAddress) - 2; +#if AVIAN_TARGET_ARCH == AVIAN_ARCH_ARM64 + const int32_t mask = (PoolOffsetMask >> 2) << 5; + *reinterpret_cast(p + ((*p & mask) >> 5)) = newTarget; +#else *reinterpret_cast(p + (((*p & PoolOffsetMask) + 8) / 4)) = newTarget; +#endif } break; default: diff --git a/src/codegen/target/arm/operations32.cpp b/src/codegen/target/arm/operations32.cpp index 77b5f2c6f9..e9cd601fe3 100644 --- a/src/codegen/target/arm/operations32.cpp +++ b/src/codegen/target/arm/operations32.cpp @@ -317,7 +317,8 @@ void moveCR2(Context* con, lir::RegisterPair dstHi(dst->high); moveCR(con, 4, &srcLo, 4, dst); moveCR(con, 4, &srcHi, 4, &dstHi); - } else if (src->value->resolved() and isOfWidth(getValue(src), 8)) { + } else if (callOffset == 0 and src->value->resolved() + and isOfWidth(getValue(src), 8)) { emit(con, movi(dst->low, lo8(getValue(src)))); // fits in immediate } else { appendConstantPoolEntry(con, src->value, callOffset); @@ -1385,6 +1386,11 @@ void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target) callR(con, vm::TargetBytesPerWord, &tmp); } +void alignedLongCallC(Context* con, unsigned size, lir::Constant* target) +{ + longCallC(con, size, target); +} + void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target) { assertT(con, size == vm::TargetBytesPerWord); @@ -1395,6 +1401,11 @@ void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target) jumpR(con, vm::TargetBytesPerWord, &tmp); } +void alignedLongJumpC(Context* con, unsigned size, lir::Constant* target) +{ + longJumpC(con, size, target); +} + void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target) { assertT(con, size == vm::TargetBytesPerWord); diff --git a/src/codegen/target/arm/operations64.cpp b/src/codegen/target/arm/operations64.cpp index 72291ea69c..5f3fc74df4 100644 --- a/src/codegen/target/arm/operations64.cpp +++ b/src/codegen/target/arm/operations64.cpp @@ -652,7 +652,7 @@ void moveCR2(Context* c, moveCR(c, size, src, size, &tmp); moveRR(c, size, &tmp, size, dst); c->client->releaseTemporary(tmp.low); - } else if (src->value->resolved()) { + } else if (callOffset == 0 and src->value->resolved()) { int64_t value = src->value->value(); if (value >= 0) { append(c, movz(dst->low, value & 0xFFFF, 0, size));