summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbuzbee <buzbee@google.com>2012-03-05 11:19:57 -0800
committerbuzbee <buzbee@google.com>2012-03-05 11:22:32 -0800
commite88dfbf138bc204b1ce21911f1c34098ea74af7c (patch)
treec7ca47856bf4b47563cb31b455f299871f7b7a6f
parent9831bfac0d2ef54a996ae02e0755600ecb5f66ea (diff)
downloadart-e88dfbf138bc204b1ce21911f1c34098ea74af7c.zip
art-e88dfbf138bc204b1ce21911f1c34098ea74af7c.tar.gz
art-e88dfbf138bc204b1ce21911f1c34098ea74af7c.tar.bz2
x86 codegen source file skeletons
Add source files for x86 code generation. These are basically just the MIPS versions with all bodies ifdef'd out and replaced with UNIMPLEMENTED(WARNING). There is also one real change in this CL related to the way the assembler computes instruction size. We now set the initial instruction size into the LIR during flag generation. For x86, this size may be changed during assembly when we detect the ability to substitute a general instruction for an equivalent one with a shorter encoding. NOTE: the new files are not yet ready for compilation. That will be the next CL. Change-Id: Id94fbeeb7c3e36ba4a8d93c3807351ea20c74133
-rw-r--r--src/compiler/Compiler.h2
-rw-r--r--src/compiler/codegen/CodegenUtil.cc3
-rw-r--r--src/compiler/codegen/CompilerCodegen.h2
-rw-r--r--src/compiler/codegen/arm/Assemble.cc8
-rw-r--r--src/compiler/codegen/mips/Assemble.cc6
-rw-r--r--src/compiler/codegen/x86/ArchFactory.cc304
-rw-r--r--src/compiler/codegen/x86/ArchUtility.cc185
-rw-r--r--src/compiler/codegen/x86/Assemble.cc324
-rw-r--r--src/compiler/codegen/x86/Codegen.h104
-rw-r--r--src/compiler/codegen/x86/FP/X86FP.cc241
-rw-r--r--src/compiler/codegen/x86/X86/Factory.cc801
-rw-r--r--src/compiler/codegen/x86/X86/Gen.cc541
-rw-r--r--src/compiler/codegen/x86/X86/Ralloc.cc134
-rw-r--r--src/compiler/codegen/x86/X86RallocUtil.cc226
-rw-r--r--src/compiler/codegen/x86/x86/ArchVariant.cc62
-rw-r--r--src/compiler/codegen/x86/x86/Codegen.cc55
16 files changed, 2992 insertions, 6 deletions
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index c7a1ac3..1d89c66 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -43,7 +43,7 @@ typedef enum OatInstructionSetType {
DALVIK_OAT_NONE = 0,
DALVIK_OAT_ARM,
DALVIK_OAT_THUMB2,
- DALVIK_OAT_IA32,
+ DALVIK_OAT_X86,
DALVIK_OAT_MIPS32
} OatInstructionSetType;
diff --git a/src/compiler/codegen/CodegenUtil.cc b/src/compiler/codegen/CodegenUtil.cc
index 07eb672..7f80311 100644
--- a/src/compiler/codegen/CodegenUtil.cc
+++ b/src/compiler/codegen/CodegenUtil.cc
@@ -115,6 +115,9 @@ void setupResourceMasks(LIR* lir)
lir->flags.pcRelFixup = true;
}
+ /* Get the starting size of the instruction's template */
+ lir->flags.size = oatGetInsnSize(lir);
+
/* Set up the mask for resources that are updated */
if (flags & (IS_LOAD | IS_STORE)) {
/* Default to heap - will catch specialized classes later */
diff --git a/src/compiler/codegen/CompilerCodegen.h b/src/compiler/codegen/CompilerCodegen.h
index fdcb555..1b54be9 100644
--- a/src/compiler/codegen/CompilerCodegen.h
+++ b/src/compiler/codegen/CompilerCodegen.h
@@ -24,6 +24,8 @@ namespace art {
LIR* rawLIR(CompilationUnit* cUnit, int dalvikOffset, int opcode, int op0 = 0,
int op1 = 0, int op2 = 0, int op3 = 0, LIR* target = NULL);
+int oatGetInsnSize(LIR* lir);
+
/* Lower middle-level IR to low-level IR for the whole method */
void oatMethodMIR2LIR(CompilationUnit* cUnit);
diff --git a/src/compiler/codegen/arm/Assemble.cc b/src/compiler/codegen/arm/Assemble.cc
index acd2af5..a1f7bd7 100644
--- a/src/compiler/codegen/arm/Assemble.cc
+++ b/src/compiler/codegen/arm/Assemble.cc
@@ -1360,10 +1360,13 @@ AssemblerStatus oatAssembleInstructions(CompilationUnit* cUnit,
return res;
}
+int oatGetInsnSize(LIR* lir)
+{
+ return EncodingMap[lir->opcode].size;
+}
+
/*
* Target-dependent offset assignment.
- * TODO: normalize usage of flags.size and make this target
- * independent.
*/
int oatAssignInsnOffsets(CompilationUnit* cUnit)
{
@@ -1376,7 +1379,6 @@ int oatAssignInsnOffsets(CompilationUnit* cUnit)
armLIR->offset = offset;
if (armLIR->opcode >= 0) {
if (!armLIR->flags.isNop) {
- armLIR->flags.size = EncodingMap[armLIR->opcode].size;
offset += armLIR->flags.size;
}
} else if (armLIR->opcode == kPseudoPseudoAlign4) {
diff --git a/src/compiler/codegen/mips/Assemble.cc b/src/compiler/codegen/mips/Assemble.cc
index a70d9da..b487602 100644
--- a/src/compiler/codegen/mips/Assemble.cc
+++ b/src/compiler/codegen/mips/Assemble.cc
@@ -706,9 +706,12 @@ AssemblerStatus oatAssembleInstructions(CompilationUnit *cUnit,
return res;
}
+int oatGetInsnSize(LIR* lir)
+{
+ return EncodingMap[lir->opcode].size;
+}
/*
* Target-dependent offset assignment.
- * TODO: normalize usage of flags.size and make this target
* independent.
*/
int oatAssignInsnOffsets(CompilationUnit* cUnit)
@@ -722,7 +725,6 @@ int oatAssignInsnOffsets(CompilationUnit* cUnit)
mipsLIR->offset = offset;
if (mipsLIR->opcode >= 0) {
if (!mipsLIR->flags.isNop) {
- mipsLIR->flags.size = EncodingMap[mipsLIR->opcode].size;
offset += mipsLIR->flags.size;
}
} else if (mipsLIR->opcode == kPseudoPseudoAlign4) {
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
new file mode 100644
index 0000000..76f7c4a
--- /dev/null
+++ b/src/compiler/codegen/x86/ArchFactory.cc
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains x86-specific codegen factory support.
+ * It is included by
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+namespace art {
+
+bool genAddLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genAddLong";
+#if 0
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu t1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,t1
+ */
+
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc2.lowReg, rlSrc1.lowReg);
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegReg(cUnit, kOpAdd, tReg, rlSrc2.highReg, rlSrc1.highReg);
+ newLIR3(cUnit, kMipsSltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+#endif
+ return false;
+}
+
+bool genSubLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genSubLong";
+#if 0
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] - [a3 a2];
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu t1,a0,v0
+ * subu v1,v1,t1
+ */
+
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
+ int tReg = oatAllocTemp(cUnit);
+ newLIR3(cUnit, kMipsSltu, tReg, rlSrc1.lowReg, rlResult.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+#endif
+ return false;
+}
+
+bool genNegLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genNegLong";
+#if 0
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = -[a1 a0]
+ * negu v0,a0
+ * negu v1,a1
+ * sltu t1,r_zero
+ * subu v1,v1,t1
+ */
+
+ opRegReg(cUnit, kOpNeg, rlResult.lowReg, rlSrc.lowReg);
+ opRegReg(cUnit, kOpNeg, rlResult.highReg, rlSrc.highReg);
+ int tReg = oatAllocTemp(cUnit);
+ newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+#endif
+ return false;
+}
+
+void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
+
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address. However, for Mips we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow. Allocate a temp register.
+ */
+int loadHelper(CompilationUnit* cUnit, int offset)
+{
+ int tReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSELF, offset, tReg);
+ return tReg;
+}
+
+void spillCoreRegs(CompilationUnit* cUnit)
+{
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ UNIMPLEMENTED(WARNING) << "spillCoreRegs";
+#if 0
+ uint32_t mask = cUnit->coreSpillMask;
+ int offset = cUnit->numCoreSpills * 4;
+ opRegImm(cUnit, kOpSub, rSP, offset);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ storeWordDisp(cUnit, rSP, offset, reg);
+ }
+ }
+#endif
+}
+
+void unSpillCoreRegs(CompilationUnit* cUnit)
+{
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ UNIMPLEMENTED(WARNING) << "unSpillCoreRegs";
+#if 0
+ uint32_t mask = cUnit->coreSpillMask;
+ int offset = cUnit->frameSize;
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ loadWordDisp(cUnit, rSP, offset, reg);
+ }
+ }
+ opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
+#endif
+}
+
+void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
+{
+ UNIMPLEMENTED(WARNING) << "genEntrySequence";
+#if 0
+ int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ /*
+ * On entry, rARG0, rARG1, rARG2 & rARG3 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with a single temp: r12. This should be enough.
+ */
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize <
+ Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ int checkReg = oatAllocTemp(cUnit);
+ int newSP = oatAllocTemp(cUnit);
+ if (!skipOverflowCheck) {
+ /* Load stack limit */
+ loadWordDisp(cUnit, rSELF,
+ Thread::StackEndOffset().Int32Value(), checkReg);
+ }
+ /* Spill core callee saves */
+ spillCoreRegs(cUnit);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cUnit->numFPSpills, 0);
+ if (!skipOverflowCheck) {
+ opRegRegImm(cUnit, kOpSub, newSP, rSP,
+ cUnit->frameSize - (spillCount * 4));
+ genRegRegCheck(cUnit, kCondCc, newSP, checkReg, NULL,
+ kThrowStackOverflow);
+ opRegCopy(cUnit, rSP, newSP); // Establish stack
+ } else {
+ opRegImm(cUnit, kOpSub, rSP,
+ cUnit->frameSize - (spillCount * 4));
+ }
+ storeBaseDisp(cUnit, rSP, 0, rARG0, kWord);
+ flushIns(cUnit);
+
+ if (cUnit->genDebugger) {
+ // Refresh update debugger callout
+ loadWordDisp(cUnit, rSELF,
+ OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
+ }
+
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
+#endif
+}
+
+void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
+{
+ UNIMPLEMENTED(WARNING) << "genExitSequence";
+#if 0
+ /*
+ * In the exit path, rRET0/rRET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ oatLockTemp(cUnit, rRET0);
+ oatLockTemp(cUnit, rRET1);
+
+ newLIR0(cUnit, kPseudoMethodExit);
+ /* If we're compiling for the debugger, generate an update callout */
+ if (cUnit->genDebugger) {
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
+ }
+ unSpillCoreRegs(cUnit);
+ opReg(cUnit, kOpBx, r_RA);
+#endif
+}
+
+/*
+ * Nop any unconditional branches that go to the next instruction.
+ * Note: new redundant branches may be inserted later, and we'll
+ * use a check in final instruction assembly to nop those out.
+ */
+void removeRedundantBranches(CompilationUnit* cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "removeRedundantBranches";
+#if 0
+ LIR* thisLIR;
+
+ for (thisLIR = (LIR*) cUnit->firstLIRInsn;
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
+
+ /* Branch to the next instruction */
+ if (thisLIR->opcode == kMipsB) {
+ LIR* nextLIR = thisLIR;
+
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
+
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
+ }
+
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
+ }
+ }
+ }
+#endif
+}
+
+
+/* Common initialization routine for an architecture family */
+bool oatArchInit()
+{
+ int i;
+
+ for (i = 0; i < kX86Last; i++) {
+ if (EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
+ " is wrong: expecting " << i << ", seeing " <<
+ (int)EncodingMap[i].opcode;
+ }
+ }
+
+ return oatArchVariantInit();
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/ArchUtility.cc b/src/compiler/codegen/x86/ArchUtility.cc
new file mode 100644
index 0000000..18aa9f4
--- /dev/null
+++ b/src/compiler/codegen/x86/ArchUtility.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../CompilerInternals.h"
+#include "X86LIR.h"
+#include "../Ralloc.h"
+
+#include <string>
+
+namespace art {
+
+/* For dumping instructions */
+#define X86_REG_COUNT 16
+static const char *x86RegName[X86_REG_COUNT] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+std::string buildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr)
+{
+ UNIMPLEMENTED(WARNING) << "buildInsnString";
+ return NULL;
+#if 0
+ std::string buf;
+ int i;
+ const char *fmtEnd = &fmt[strlen(fmt)];
+ char tbuf[256];
+ char nc;
+ while (fmt < fmtEnd) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmtEnd);
+ nc = *fmt++;
+ if (nc=='!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT((unsigned)(nc-'0'), 4u);
+ operand = lir->operands[nc-'0'];
+ switch(*fmt++) {
+ case 'b':
+ strcpy(tbuf,"0000");
+ for (i=3; i>= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 's':
+ sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+ break;
+ case 'S':
+ DCHECK_EQ(((operand & FP_REG_MASK) & 1), 0);
+ sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+ break;
+ case 'h':
+ sprintf(tbuf,"%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ sprintf(tbuf,"%d", operand);
+ break;
+ case 'D':
+ sprintf(tbuf,"%d", operand+1);
+ break;
+ case 'E':
+ sprintf(tbuf,"%d", operand*4);
+ break;
+ case 'F':
+ sprintf(tbuf,"%d", operand*2);
+ break;
+ case 't':
+ sprintf(tbuf,"0x%08x (L%p)",
+ (int) baseAddr + lir->offset + 4 +
+ (operand << 2),
+ lir->target);
+ break;
+ case 'T':
+ sprintf(tbuf,"0x%08x",
+ (int) (operand << 2));
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ intptr_t target =
+ ((((intptr_t) baseAddr + lir->offset + 4) &
+ ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
+ 0xfffffffc;
+ sprintf(tbuf, "%p", (void *) target);
+ break;
+ }
+
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'r':
+ DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
+ strcpy(tbuf, mipsRegName[operand]);
+ break;
+ case 'N':
+ // Placeholder for delay slot handling
+ strcpy(tbuf, "; nop");
+ break;
+ default:
+ strcpy(tbuf,"DecodeError");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
+ }
+ }
+ return buf;
+#endif
+}
+
+void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
+{
+ UNIMPLEMENTED(WARNING) << "oatDumpResourceMasks";
+#if 0
+ char buf[256];
+ buf[0] = 0;
+ LIR *mipsLIR = (LIR *) lir;
+
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
+ }
+ if (mask & ENCODE_FP_STATUS) {
+ strcat(buf, "fpcc ");
+ }
+ /* Memory bits */
+ if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
+ (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+#endif
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/Assemble.cc b/src/compiler/codegen/x86/Assemble.cc
new file mode 100644
index 0000000..3614dce
--- /dev/null
+++ b/src/compiler/codegen/x86/Assemble.cc
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../Dalvik.h"
+#include "../../CompilerInternals.h"
+#include "X86LIR.h"
+#include "Codegen.h"
+#include <sys/mman.h> /* for protection change */
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: MipsOpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+ k3, k3s, k3e, flags, name, fmt, size) \
+ {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+ {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ * 0 -> operands[0] (dest)
+ * 1 -> operands[1] (src1)
+ * 2 -> operands[2] (src2)
+ * 3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ * h -> 4-digit hex
+ * d -> decimal
+ * E -> decimal*4
+ * F -> decimal*2
+ * c -> branch condition (beq, bne, etc.)
+ * t -> pc-relative target
+ * T -> pc-region target
+ * u -> 1st half of bl[x] target
+ * v -> 2nd half ob bl[x] target
+ * R -> register list
+ * s -> single precision floating point register
+ * S -> double precision floating point register
+ * m -> Thumb2 modified immediate
+ * n -> complimented Thumb2 modified immediate
+ * M -> Thumb2 16-bit zero-extended immediate
+ * b -> 4-digit binary
+ * N -> append a NOP
+ *
+ * [!] escape. To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots. All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop. This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+MipsEncodingMap EncodingMap[kX86Last] = {
+};
+
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction. In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus oatAssembleInstructions(CompilationUnit *cUnit,
+ intptr_t startAddr)
+{
+ UNIMPLEMENTED(WARNING) << "oatAssembleInstructions";
+ return kSuccess;
+#if 0
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success
+
+ for (lir = (LIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
+ }
+
+
+ if (lir->flags.isNop) {
+ continue;
+ }
+
+ if (lir->flags.pcRelFixup) {
+ if (lir->opcode == kMipsDelta) {
+ /*
+ * The "Delta" pseudo-ops load the difference between
+ * two pc-relative locations into a the target register
+ * found in operands[0]. The delta is determined by
+ * (label2 - label1), where label1 is a standard
+ * kPseudoTargetLabel and is stored in operands[2].
+ * If operands[3] is null, then label2 is a kPseudoTargetLabel
+ * and is found in lir->target. If operands[3] is non-NULL,
+ * then it is a Switch/Data table.
+ */
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ if ((delta & 0xffff) == delta) {
+ // Fits
+ lir->operands[1] = delta;
+ } else {
+ // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
+ LIR *newDeltaHi =
+ rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaHi,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaHi);
+ LIR *newDeltaLo =
+ rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaLo,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaLo);
+ lir->flags.isNop = true;
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kMipsDeltaLo) {
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = delta & 0xffff;
+ } else if (lir->opcode == kMipsDeltaHi) {
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = (delta >> 16) & 0xffff;
+ } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[0] = delta >> 2;
+ }
+ } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[1] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[2] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsJal) {
+ intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ intptr_t target = lir->operands[0];
+ /* ensure PC-region branch can be used */
+ DCHECK_EQ((curPC & 0xF0000000), (target & 0xF0000000));
+ if (target & 0x3) {
+ LOG(FATAL) << "Jump target not multiple of 4: " << target;
+ }
+ lir->operands[0] = target >> 2;
+ } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t target = startAddr + targetLIR->offset;
+ lir->operands[1] = target >> 16;
+ } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t target = startAddr + targetLIR->offset;
+ lir->operands[2] = lir->operands[2] + target;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+ u4 bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ u4 operand;
+ u4 value;
+ operand = lir->operands[i];
+ switch(encoder->fieldLoc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtBitBlt:
+ if (encoder->fieldLoc[i].start == 0 && encoder->fieldLoc[i].end == 31) {
+ value = operand;
+ } else {
+ value = (operand << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ }
+ bits |= value;
+ break;
+ case kFmtBlt5_2:
+ value = (operand & 0x1f);
+ bits |= (value << encoder->fieldLoc[i].start);
+ bits |= (value << encoder->fieldLoc[i].end);
+ break;
+ case kFmtDfp: {
+ DCHECK(DOUBLEREG(operand));
+ DCHECK((operand & 0x1) == 0);
+ value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(SINGLEREG(operand));
+ value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ default:
+ LOG(FATAL) << "Bad encoder format: "
+ << (int)encoder->fieldLoc[i].kind;
+ }
+ }
+ // FIXME: need multi-endian handling here
+ cUnit->codeBuffer.push_back((bits >> 16) & 0xffff);
+ cUnit->codeBuffer.push_back(bits & 0xffff);
+ // TUNING: replace with proper delay slot handling
+ if (encoder->size == 8) {
+ const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
+ u4 bits = encoder->skeleton;
+ cUnit->codeBuffer.push_back((bits >> 16) & 0xffff);
+ cUnit->codeBuffer.push_back(bits & 0xffff);
+ }
+ }
+ return res;
+#endif
+}
+
+int oatGetInsnSize(LIR* lir)
+{
+ return EncodingMap[lir->opcode].size;
+}
+/*
+ * Target-dependent offset assignment.
+ * independent.
+ */
+int oatAssignInsnOffsets(CompilationUnit* cUnit)
+{
+ LIR* x86LIR;
+ int offset = 0;
+
+ for (x86LIR = (LIR *) cUnit->firstLIRInsn;
+ x86LIR;
+ x86LIR = NEXT_LIR(x86LIR)) {
+ x86LIR->offset = offset;
+ if (x86LIR->opcode >= 0) {
+ if (!x86LIR->flags.isNop) {
+ offset += x86LIR->flags.size;
+ }
+ } else if (x86LIR->opcode == kPseudoPseudoAlign4) {
+ if (offset & 0x2) {
+ offset += 2;
+ x86LIR->operands[0] = 1;
+ } else {
+ x86LIR->operands[0] = 0;
+ }
+ }
+ /* Pseudo opcodes don't consume space */
+ }
+
+ return offset;
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/Codegen.h b/src/compiler/codegen/x86/Codegen.h
new file mode 100644
index 0000000..520d638
--- /dev/null
+++ b/src/compiler/codegen/x86/Codegen.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains register alloction support and is intended to be
+ * included by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+#include "../../CompilerIR.h"
+
+namespace art {
+
+#if defined(_CODEGEN_C)
+bool genAddLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2);
+bool genSubLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2);
+bool genNegLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc);
+LIR *opRegImm(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int value);
+LIR *opRegReg(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2);
+LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+ int src2, LIR* target);
+LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
+ int checkValue, LIR* target);
+
+/* Forward declaraton the portable versions due to circular dependency */
+bool genArithOpFloatPortable(CompilationUnit* cUnit, MIR* mir,
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2);
+
+bool genArithOpDoublePortable(CompilationUnit* cUnit, MIR* mir,
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2);
+
+bool genConversionPortable(CompilationUnit* cUnit, MIR* mir);
+
+int loadHelper(CompilationUnit* cUnit, int offset);
+LIR* callRuntimeHelper(CompilationUnit* cUnit, int reg);
+RegLocation getRetLoc(CompilationUnit* cUnit);
+LIR* loadConstant(CompilationUnit* cUnit, int reg, int immVal);
+void opRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
+ int srcLo, int srcHi);
+LIR* opRegCopy(CompilationUnit* cUnit, int rDest, int rSrc);
+void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
+ RegLocation rlFree);
+
+
+/*
+ * Return most flexible allowed register class based on size.
+ * Bug: 2813841
+ * Must use a core register for data types narrower than word (due
+ * to possible unaligned load/store.
+ */
+inline RegisterClass oatRegClassBySize(OpSize size)
+{
+ return (size == kUnsignedHalf ||
+ size == kSignedHalf ||
+ size == kUnsignedByte ||
+ size == kSignedByte ) ? kCoreReg : kAnyReg;
+}
+
+/*
+ * Construct an s4 from two consecutive half-words of switch data.
+ * This needs to check endianness because the DEX optimizer only swaps
+ * half-words in instruction stream.
+ *
+ * "switchData" must be 32-bit aligned.
+ */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+inline s4 s4FromSwitchData(const void* switchData) {
+ return *(s4*) switchData;
+}
+#else
+inline s4 s4FromSwitchData(const void* switchData) {
+ u2* data = switchData;
+ return data[0] | (((s4) data[1]) << 16);
+}
+#endif
+
+#endif
+
+extern void oatSetupResourceMasks(LIR* lir);
+
+extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest,
+ int rSrc);
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/FP/X86FP.cc
new file mode 100644
index 0000000..db3b928
--- /dev/null
+++ b/src/compiler/codegen/x86/FP/X86FP.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace art {
+
+bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genArithOpFloat";
+ return false;
+#if 0
+#ifdef __mips_hard_float
+ int op = kMipsNop;
+ RegLocation rlResult;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (mir->dalvikInsn.opcode) {
+ case OP_ADD_FLOAT_2ADDR:
+ case OP_ADD_FLOAT:
+ op = kMipsFadds;
+ break;
+ case OP_SUB_FLOAT_2ADDR:
+ case OP_SUB_FLOAT:
+ op = kMipsFsubs;
+ break;
+ case OP_DIV_FLOAT_2ADDR:
+ case OP_DIV_FLOAT:
+ op = kMipsFdivs;
+ break;
+ case OP_MUL_FLOAT_2ADDR:
+ case OP_MUL_FLOAT:
+ op = kMipsFmuls;
+ break;
+ case OP_REM_FLOAT_2ADDR:
+ case OP_REM_FLOAT:
+ case OP_NEG_FLOAT: {
+ return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ }
+ default:
+ return true;
+ }
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR3(cUnit, (MipsOpCode)op, rlResult.lowReg, rlSrc1.lowReg,
+ rlSrc2.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+
+ return false;
+#else
+ return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+#endif
+#endif
+}
+
+static bool genArithOpDouble(CompilationUnit *cUnit, MIR *mir,
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genArithOpDouble";
+ return false;
+#if 0
+#ifdef __mips_hard_float
+ int op = kMipsNop;
+ RegLocation rlResult;
+
+ switch (mir->dalvikInsn.opcode) {
+ case OP_ADD_DOUBLE_2ADDR:
+ case OP_ADD_DOUBLE:
+ op = kMipsFaddd;
+ break;
+ case OP_SUB_DOUBLE_2ADDR:
+ case OP_SUB_DOUBLE:
+ op = kMipsFsubd;
+ break;
+ case OP_DIV_DOUBLE_2ADDR:
+ case OP_DIV_DOUBLE:
+ op = kMipsFdivd;
+ break;
+ case OP_MUL_DOUBLE_2ADDR:
+ case OP_MUL_DOUBLE:
+ op = kMipsFmuld;
+ break;
+ case OP_REM_DOUBLE_2ADDR:
+ case OP_REM_DOUBLE:
+ case OP_NEG_DOUBLE: {
+ return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ }
+ default:
+ return true;
+ }
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ DCHECK(rlSrc1.wide);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ DCHECK(rlSrc2.wide);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ DCHECK(rlDest.wide);
+ DCHECK(rlResult.wide);
+ newLIR3(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
+ S2D(rlSrc1.lowReg, rlSrc1.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+#else
+ return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+#endif
+#endif
+}
+
+static bool genConversion(CompilationUnit *cUnit, MIR *mir)
+{
+ UNIMPLEMENTED(WARNING) << "genConversion";
+ return false;
+#if 0
+#ifdef __mips_hard_float
+ Opcode opcode = mir->dalvikInsn.opcode;
+ bool longSrc = false;
+ bool longDest = false;
+ RegLocation rlSrc;
+ RegLocation rlDest;
+ int op = kMipsNop;
+ int srcReg;
+ RegLocation rlResult;
+ switch (opcode) {
+ case OP_INT_TO_FLOAT:
+ longSrc = false;
+ longDest = false;
+ op = kMipsFcvtsw;
+ break;
+ case OP_DOUBLE_TO_FLOAT:
+ longSrc = true;
+ longDest = false;
+ op = kMipsFcvtsd;
+ break;
+ case OP_FLOAT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kMipsFcvtds;
+ break;
+ case OP_INT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kMipsFcvtdw;
+ break;
+ case OP_FLOAT_TO_INT:
+ case OP_DOUBLE_TO_INT:
+ case OP_LONG_TO_DOUBLE:
+ case OP_FLOAT_TO_LONG:
+ case OP_LONG_TO_FLOAT:
+ case OP_DOUBLE_TO_LONG:
+ return genConversionPortable(cUnit, mir);
+ default:
+ return true;
+ }
+ if (longSrc) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
+ srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlSrc = loadValue(cUnit, rlSrc, kFPReg);
+ srcReg = rlSrc.lowReg;
+ }
+ if (longDest) {
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg), srcReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ rlDest = oatGetDest(cUnit, mir, 0);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (MipsOpCode)op, rlResult.lowReg, srcReg);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ return false;
+#else
+ return genConversionPortable(cUnit, mir);
+#endif
+#endif
+}
+
+static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genCmpFP";
+#if 0
+ bool wide = true;
+ int offset;
+
+ switch(mir->dalvikInsn.opcode) {
+ case OP_CMPL_FLOAT:
+ offset = OFFSETOF_MEMBER(Thread, pCmplFloat);
+ wide = false;
+ break;
+ case OP_CMPG_FLOAT:
+ offset = OFFSETOF_MEMBER(Thread, pCmpgFloat);
+ wide = false;
+ break;
+ case OP_CMPL_DOUBLE:
+ offset = OFFSETOF_MEMBER(Thread, pCmplDouble);
+ break;
+ case OP_CMPG_DOUBLE:
+ offset = OFFSETOF_MEMBER(Thread, pCmpgDouble);
+ break;
+ default:
+ return true;
+ }
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit);
+ if (wide) {
+ loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
+ } else {
+ loadValueDirectFixed(cUnit, rlSrc1, rARG0);
+ loadValueDirectFixed(cUnit, rlSrc2, rARG1);
+ }
+ int rTgt = loadHelper(cUnit, offset);
+ opReg(cUnit, kOpBlx, rTgt);
+ RegLocation rlResult = oatGetReturn(cUnit);
+ storeValue(cUnit, rlDest, rlResult);
+#endif
+ return false;
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/X86/Factory.cc
new file mode 100644
index 0000000..9330021
--- /dev/null
+++ b/src/compiler/codegen/x86/X86/Factory.cc
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace art {
+
+/*
+ * This file contains codegen for the MIPS32 ISA and is intended to be
+ * includes by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+static int coreRegs[] = {rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI}
+static int reservedRegs[] = {rSP};
+static int coreTemps[] = {rAX, rCX, rDX}
+static int fpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
+ fr10, fr11, fr12, fr13, fr14, fr15}
+static int fpTemps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9,
+ fr10, fr11, fr12, fr13, fr14, fr15}
+
+void genBarrier(CompilationUnit *cUnit);
+void storePair(CompilationUnit *cUnit, int base, int lowReg,
+ int highReg);
+void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg);
+LIR *loadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
+ int rDest);
+LIR *storeWordDisp(CompilationUnit *cUnit, int rBase,
+ int displacement, int rSrc);
+LIR *loadConstant(CompilationUnit *cUnit, int rDest, int value);
+
+LIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+ UNIMPLEMENTED(WARNING) << "fpRegCopy";
+ return NULL;
+#if 0
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
+ if (DOUBLEREG(rDest)) {
+ opcode = kMipsFmovd;
+ } else {
+ if (SINGLEREG(rDest)) {
+ if (SINGLEREG(rSrc)) {
+ opcode = kMipsFmovs;
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ int tOpnd = rSrc;
+ rSrc = rDest;
+ rDest = tOpnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kMipsMfc1;
+ }
+ }
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rSrc, rDest);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
+#endif
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool. If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) rDest is freshly returned from oatAllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest,
+ int value)
+{
+ UNIMPLEMENTED(WARNING) << "loadConstantNoClobber";
+ return NULL;
+#if 0
+ LIR *res;
+
+ int rDestSave = rDest;
+ int isFpReg = FPREG(rDest);
+ if (isFpReg) {
+ DCHECK(SINGLEREG(rDest));
+ rDest = oatAllocTemp(cUnit);
+ }
+
+ /* See if the value can be constructed cheaply */
+ if (value == 0) {
+ res = newLIR2(cUnit, kMipsMove, rDest, r_ZERO);
+ } else if ((value > 0) && (value <= 65535)) {
+ res = newLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
+ } else if ((value < 0) && (value >= -32768)) {
+ res = newLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
+ } else {
+ res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
+ if (value & 0xffff)
+ newLIR3(cUnit, kMipsOri, rDest, rDest, value);
+ }
+
+ if (isFpReg) {
+ newLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
+ oatFreeTemp(cUnit, rDest);
+ }
+
+ return res;
+#endif
+}
+
+LIR *opNone(CompilationUnit *cUnit, OpKind op)
+{
+ UNIMPLEMENTED(WARNING) << "opNone";
+ return NULL;
+#if 0
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpUncondBr:
+ opcode = kMipsB;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opNone";
+ }
+ res = newLIR0(cUnit, opcode);
+ return res;
+#endif
+}
+
+LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
+
+LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+{
+ UNIMPLEMENTED(WARNING) << "opReg";
+ return NULL;
+#if 0
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpBlx:
+ opcode = kMipsJalr;
+ break;
+ case kOpBx:
+ return newLIR1(cUnit, kMipsJr, rDestSrc);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opReg";
+ }
+ return newLIR2(cUnit, opcode, r_RA, rDestSrc);
+#endif
+}
+
+LIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
+ int rSrc1, int value);
+LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+ int value)
+{
+ UNIMPLEMENTED(WARNING) << "opRegImm";
+ return NULL;
+#if 0
+ LIR *res;
+ bool neg = (value < 0);
+ int absValue = (neg) ? -value : value;
+ bool shortForm = (absValue & 0xff) == absValue;
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ break;
+ case kOpSub:
+ return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opRegImm";
+ break;
+ }
+ if (shortForm)
+ res = newLIR2(cUnit, opcode, rDestSrc1, absValue);
+ else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rScratch, value);
+ if (op == kOpCmp)
+ newLIR2(cUnit, opcode, rDestSrc1, rScratch);
+ else
+ newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rScratch);
+ }
+ return res;
+#endif
+}
+
+LIR *opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest,
+ int rSrc1, int rSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "opRegRegReg";
+ return NULL;
+#if 0
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ opcode = kMipsAddu;
+ break;
+ case kOpSub:
+ opcode = kMipsSubu;
+ break;
+ case kOpAnd:
+ opcode = kMipsAnd;
+ break;
+ case kOpMul:
+ opcode = kMipsMul;
+ break;
+ case kOpOr:
+ opcode = kMipsOr;
+ break;
+ case kOpXor:
+ opcode = kMipsXor;
+ break;
+ case kOpLsl:
+ opcode = kMipsSllv;
+ break;
+ case kOpLsr:
+ opcode = kMipsSrlv;
+ break;
+ case kOpAsr:
+ opcode = kMipsSrav;
+ break;
+ case kOpAdc:
+ case kOpSbc:
+ LOG(FATAL) << "No carry bit on MIPS";
+ break;
+ default:
+ LOG(FATAL) << "bad case in opRegRegReg";
+ break;
+ }
+ return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+#endif
+}
+
+LIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
+ int rSrc1, int value)
+{
+ UNIMPLEMENTED(WARNING) << "opRegRegImm";
+ return NULL;
+#if 0
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = true;
+
+ switch(op) {
+ case kOpAdd:
+ if (IS_SIMM16(value)) {
+ opcode = kMipsAddiu;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsAddu;
+ }
+ break;
+ case kOpSub:
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMipsAddiu;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsSubu;
+ }
+ break;
+ case kOpLsl:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSll;
+ break;
+ case kOpLsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSrl;
+ break;
+ case kOpAsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSra;
+ break;
+ case kOpAnd:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsAndi;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsAnd;
+ }
+ break;
+ case kOpOr:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsOri;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsOr;
+ }
+ break;
+ case kOpXor:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsXori;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsXor;
+ }
+ break;
+ case kOpMul:
+ shortForm = false;
+ opcode = kMipsMul;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opRegRegImm";
+ break;
+ }
+
+ if (shortForm)
+ res = newLIR3(cUnit, opcode, rDest, rSrc1, value);
+ else {
+ if (rDest != rSrc1) {
+ res = loadConstant(cUnit, rDest, value);
+ newLIR3(cUnit, opcode, rDest, rSrc1, rDest);
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rScratch, value);
+ newLIR3(cUnit, opcode, rDest, rSrc1, rScratch);
+ }
+ }
+ return res;
+#endif
+}
+
+LIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+ int rSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "opRegReg";
+ return NULL;
+#if 0
+ MipsOpCode opcode = kMipsNop;
+ LIR *res;
+ switch (op) {
+ case kOpMov:
+ opcode = kMipsMove;
+ break;
+ case kOpMvn:
+ return newLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
+ case kOpNeg:
+ return newLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
+ case kOpAdd:
+ case kOpAnd:
+ case kOpMul:
+ case kOpOr:
+ case kOpSub:
+ case kOpXor:
+ return opRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
+ case kOp2Byte:
+#if __mips_isa_rev>=2
+ res = newLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
+#else
+ res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
+ opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
+#endif
+ return res;
+ case kOp2Short:
+#if __mips_isa_rev>=2
+ res = newLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
+#else
+ res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
+ opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
+#endif
+ return res;
+ case kOp2Char:
+ return newLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
+ default:
+ LOG(FATAL) << "Bad case in opRegReg";
+ break;
+ }
+ return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+#endif
+}
+
+LIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
+ int rDestHi, int valLo, int valHi)
+{
+ LIR *res;
+ res = loadConstantNoClobber(cUnit, rDestLo, valLo);
+ loadConstantNoClobber(cUnit, rDestHi, valHi);
+ return res;
+}
+
+/* Load value from base + scaled index. */
+LIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
+ int rIndex, int rDest, int scale, OpSize size)
+{
+ UNIMPLEMENTED(WARNING) << "loadBaseIndexed";
+ return NULL;
+#if 0
+ LIR *first = NULL;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ int tReg = oatAllocTemp(cUnit);
+
+ if (FPREG(rDest)) {
+ DCHECK(SINGLEREG(rDest));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+
+ if (!scale) {
+ first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ }
+
+ switch (size) {
+ case kSingle:
+ opcode = kMipsFlwc1;
+ break;
+ case kWord:
+ opcode = kMipsLw;
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexed";
+ }
+
+ res = newLIR3(cUnit, opcode, rDest, 0, tReg);
+ oatFreeTemp(cUnit, tReg);
+ return (first) ? first : res;
+#endif
+}
+
+/* store value base base + scaled index. */
+LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase,
+ int rIndex, int rSrc, int scale, OpSize size)
+{
+ UNIMPLEMENTED(WARNING) << "storeBaseIndexed";
+ return NULL;
+#if 0
+ LIR *first = NULL;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ int rNewIndex = rIndex;
+ int tReg = oatAllocTemp(cUnit);
+
+#ifdef __mips_hard_float
+ if (FPREG(rSrc)) {
+ DCHECK(SINGLEREG(rSrc));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+#endif
+
+ if (!scale) {
+ first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ }
+
+ switch (size) {
+#ifdef __mips_hard_float
+ case kSingle:
+ opcode = kMipsFswc1;
+ break;
+#endif
+ case kWord:
+ opcode = kMipsSw;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in storeBaseIndexed";
+ }
+ res = newLIR3(cUnit, opcode, rSrc, 0, tReg);
+ oatFreeTemp(cUnit, rNewIndex);
+ return first;
+#endif
+}
+
+LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+{
+ UNIMPLEMENTED(WARNING) << "loadMultiple";
+ return NULL;
+#if 0
+ int i;
+ int loadCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
+
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+ newLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
+ loadCnt++;
+ }
+ }
+
+ if (loadCnt) {/* increment after */
+ newLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
+ }
+
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
+#endif
+}
+
+LIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+{
+ UNIMPLEMENTED(WARNING) << "storeMultiple";
+ return NULL;
+ int i;
+ int storeCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
+
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+ newLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
+ storeCnt++;
+ }
+ }
+
+ if (storeCnt) { /* increment after */
+ newLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
+ }
+
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
+#endif
+}
+
+LIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
+ int displacement, int rDest, int rDestHi,
+ OpSize size, int sReg)
+/*
+ * Load value from base + displacement. Optionally perform null check
+ * on base (which must have an associated sReg and MIR). If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps. If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+{
+ UNIMPLEMENTED(WARNING) << "loadBaseDispBody";
+ return NULL;
+#if 0
+ LIR *res;
+ LIR *load = NULL;
+ LIR *load2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = IS_SIMM16(displacement);
+ bool pair = false;
+
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsLw;
+ if (FPREG(rDest)) {
+ opcode = kMipsFlwc1;
+ if (DOUBLEREG(rDest)) {
+ rDest = rDest - FP_DOUBLE;
+ } else {
+ DCHECK(FPREG(rDestHi));
+ DCHECK(rDest == (rDestHi - 1));
+ }
+ rDestHi = rDest + 1;
+ }
+ shortForm = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsLw;
+ if (FPREG(rDest)) {
+ opcode = kMipsFlwc1;
+ DCHECK(SINGLEREG(rDest));
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexedBody";
+ }
+
+ if (shortForm) {
+ if (!pair) {
+ load = res = newLIR3(cUnit, opcode, rDest, displacement, rBase);
+ } else {
+ load = res = newLIR3(cUnit, opcode, rDest, displacement + LOWORD_OFFSET, rBase);
+ load2 = newLIR3(cUnit, opcode, rDestHi, displacement + HIWORD_OFFSET, rBase);
+ }
+ } else {
+ if (pair) {
+ int rTmp = oatAllocFreeTemp(cUnit);
+ res = opRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
+ load = newLIR3(cUnit, opcode, rDest, LOWORD_OFFSET, rTmp);
+ load2 = newLIR3(cUnit, opcode, rDestHi, HIWORD_OFFSET, rTmp);
+ oatFreeTemp(cUnit, rTmp);
+ } else {
+ int rTmp = (rBase == rDest) ? oatAllocFreeTemp(cUnit)
+ : rDest;
+ res = loadConstant(cUnit, rTmp, displacement);
+ load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
+ if (rTmp != rDest)
+ oatFreeTemp(cUnit, rTmp);
+ }
+ }
+
+ if (rBase == rSP) {
+ if (load != NULL)
+ annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ true /* isLoad */);
+ if (load2 != NULL)
+ annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+ true /* isLoad */);
+ }
+ return load;
+#endif
+}
+
+LIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir, int rBase,
+ int displacement, int rDest, OpSize size,
+ int sReg)
+{
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1,
+ size, sReg);
+}
+
+LIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir, int rBase,
+ int displacement, int rDestLo, int rDestHi,
+ int sReg)
+{
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
+ kLong, sReg);
+}
+
+LIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase,
+ int displacement, int rSrc, int rSrcHi,
+ OpSize size)
+{
+ UNIMPLEMENTED(WARNING) << "storeBaseDispBody";
+ return NULL;
+#if 0
+ LIR *res;
+ LIR *store = NULL;
+ LIR *store2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = IS_SIMM16(displacement);
+ bool pair = false;
+
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsSw;
+#ifdef __mips_hard_float
+ if (FPREG(rSrc)) {
+ opcode = kMipsFswc1;
+ if (DOUBLEREG(rSrc)) {
+ rSrc = rSrc - FP_DOUBLE;
+ } else {
+ DCHECK(FPREG(rSrcHi));
+ DCHECK_EQ(rSrc, (rSrcHi - 1));
+ }
+ rSrcHi = rSrc + 1;
+ }
+#endif
+ shortForm = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsSw;
+#ifdef __mips_hard_float
+ if (FPREG(rSrc)) {
+ opcode = kMipsFswc1;
+ DCHECK(SINGLEREG(rSrc));
+ }
+#endif
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in storeBaseIndexedBody";
+ }
+
+ if (shortForm) {
+ if (!pair) {
+ store = res = newLIR3(cUnit, opcode, rSrc, displacement, rBase);
+ } else {
+ store = res = newLIR3(cUnit, opcode, rSrc, displacement + LOWORD_OFFSET, rBase);
+ store2 = newLIR3(cUnit, opcode, rSrcHi, displacement + HIWORD_OFFSET, rBase);
+ }
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = opRegRegImm(cUnit, kOpAdd, rScratch, rBase, displacement);
+ if (!pair) {
+ store = newLIR3(cUnit, opcode, rSrc, 0, rScratch);
+ } else {
+ store = newLIR3(cUnit, opcode, rSrc, LOWORD_OFFSET, rScratch);
+ store2 = newLIR3(cUnit, opcode, rSrcHi, HIWORD_OFFSET, rScratch);
+ }
+ oatFreeTemp(cUnit, rScratch);
+ }
+
+ if (rBase == rSP) {
+ if (store != NULL)
+ annotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ false /* isLoad */);
+ if (store2 != NULL)
+ annotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+ false /* isLoad */);
+ }
+
+ return res;
+#endif
+}
+
+LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase,
+ int displacement, int rSrc, OpSize size)
+{
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+}
+
+LIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase,
+ int displacement, int rSrcLo, int rSrcHi)
+{
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+}
+
+void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+{
+ storeWordDisp(cUnit, base, LOWORD_OFFSET, lowReg);
+ storeWordDisp(cUnit, base, HIWORD_OFFSET, highReg);
+}
+
+void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+{
+ loadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
+ loadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
new file mode 100644
index 0000000..1ea94c9
--- /dev/null
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen for the Mips ISA and is intended to be
+ * includes by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+namespace art {
+
+/*
+ * The lack of pc-relative loads on Mips presents somewhat of a challenge
+ * for our PIC switch table strategy. To materialize the current location
+ * we'll do a dummy JAL and reference our tables using r_RA as the
+ * base register. Note that r_RA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table. We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ * ori rEnd, r_ZERO, #tableSize ; size in bytes
+ * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * nop ; opportunistically fill
+ * BaseLabel:
+ * addiu rBase, r_RA, <table> - <BaseLabel> ; table relative to BaseLabel
+ addu rEnd, rEnd, rBase ; end of table
+ * lw rVal, [rSP, vRegOff] ; Test Value
+ * loop:
+ * beq rBase, rEnd, done
+ * lw rKey, 0(rBase)
+ * addu rBase, 8
+ * bne rVal, rKey, loop
+ * lw rDisp, -4(rBase)
+ * addu r_RA, rDisp
+ * jr r_RA
+ * done:
+ *
+ */
+void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genSparseSwitch";
+ return NULL;
+#if 0
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpSparseSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int elements = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, elements * sizeof(LIR*), true,
+ kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+
+ // The table is composed of 8-byte key/disp pairs
+ int byteSize = elements * 8;
+
+ int sizeHi = byteSize >> 16;
+ int sizeLo = byteSize & 0xffff;
+
+ int rEnd = oatAllocTemp(cUnit);
+ if (sizeHi) {
+ newLIR2(cUnit, kMipsLui, rEnd, sizeHi);
+ }
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit); // Scheduling barrier
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot
+ if (sizeHi) {
+ newLIR3(cUnit, kMipsOri, rEnd, rEnd, sizeLo);
+ } else {
+ newLIR3(cUnit, kMipsOri, rEnd, r_ZERO, sizeLo);
+ }
+ genBarrier(cUnit); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tabRec->anchor = baseLabel;
+ int rBase = oatAllocTemp(cUnit);
+ newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+ opRegRegReg(cUnit, kOpAdd, rEnd, rEnd, rBase);
+
+ // Grab switch test value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+
+ // Test loop
+ int rKey = oatAllocTemp(cUnit);
+ LIR* loopLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ LIR* exitBranch = opCmpBranch(cUnit , kCondEq, rBase, rEnd, NULL);
+ loadWordDisp(cUnit, rBase, 0, rKey);
+ opRegImm(cUnit, kOpAdd, rBase, 8);
+ opCmpBranch(cUnit, kCondNe, rlSrc.lowReg, rKey, loopLabel);
+ int rDisp = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rBase, -4, rDisp);
+ opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
+ opReg(cUnit, kOpBx, r_RA);
+
+ // Loop exit
+ LIR* exitLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ exitBranch->target = exitLabel;
+#endif
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * lw rVal
+ * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * nop ; opportunistically fill
+ * [subiu rVal, bias] ; Remove bias if lowVal != 0
+ * bound check -> done
+ * lw rDisp, [r_RA, rVal]
+ * addu r_RA, rDisp
+ * jr r_RA
+ * done:
+ */
+void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genPackedSwitch";
+#if 0
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int size = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
+ kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+
+ // Get the switch value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+
+ // Prepare the bias. If too big, handle 1st stage here
+ int lowKey = s4FromSwitchData(&table[2]);
+ bool largeBias = false;
+ int rKey;
+ if (lowKey == 0) {
+ rKey = rlSrc.lowReg;
+ } else if ((lowKey & 0xffff) != lowKey) {
+ rKey = oatAllocTemp(cUnit);
+ loadConstant(cUnit, rKey, lowKey);
+ largeBias = true;
+ } else {
+ rKey = oatAllocTemp(cUnit);
+ }
+
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit);
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with bias strip
+ if (lowKey == 0) {
+ newLIR0(cUnit, kMipsNop);
+ } else {
+ if (largeBias) {
+ opRegRegReg(cUnit, kOpSub, rKey, rlSrc.lowReg, rKey);
+ } else {
+ opRegRegImm(cUnit, kOpSub, rKey, rlSrc.lowReg, lowKey);
+ }
+ }
+ genBarrier(cUnit); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tabRec->anchor = baseLabel;
+
+ // Bounds check - if < 0 or >= size continue following switch
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondHi, rKey, size-1, NULL);
+
+ // Materialize the table base pointer
+ int rBase = oatAllocTemp(cUnit);
+ newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+
+ // Load the displacement from the switch table
+ int rDisp = oatAllocTemp(cUnit);
+ loadBaseIndexed(cUnit, rBase, rKey, rDisp, 2, kWord);
+
+ // Add to r_AP and go
+ opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
+ opReg(cUnit, kOpBx, r_RA);
+
+ /* branchOver target here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
+#endif
+}
+
+/*
+ * Array data table format:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void genFillArrayData(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genFillArrayData";
+#if 0
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tabRec = (FillArrayData *)
+ oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ u2 width = tabRec->table[1];
+ u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
+ tabRec->size = (size * width) + 8;
+
+ oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
+
+ // Making a call - use explicit registers
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ oatLockCallTemps(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0);
+
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit);
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with the helper load
+ int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
+ pHandleFillArrayDataFromCode));
+ genBarrier(cUnit); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+
+ // Materialize a pointer to the fill data image
+ newLIR4(cUnit, kMipsDelta, rARG1, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+
+ // And go...
+ callRuntimeHelper(cUnit, rTgt); // ( array*, fill_data* )
+}
+
+void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+{
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.lowReg,
+ rlSrc.lowReg, 0x80000000);
+ storeValue(cUnit, rlDest, rlResult);
+#endif
+}
+
+void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genNegDouble";
+#if 0
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg,
+ 0x80000000);
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+#endif
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genMonitorEnter";
+#if 0
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pLockObjectFromCode));
+ callRuntimeHelper(cUnit, rTgt);
+#endif
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
+ UNIMPLEMENTED(WARNING) << "genMonitor";
+#if 0
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pUnlockObjectFromCode));
+ callRuntimeHelper(cUnit, rTgt);
+#endif
+}
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu res, t0, t1 # res = -1:1:0 for [ < > = ]
+ * bnez res, finish
+ * sltu t0, x.lo, y.lo
+ * sgtu r1, x.lo, y.lo
+ * subu res, t0, t1
+ * finish:
+ *
+ */
+void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ UNIMPLEMENTED(WARNING) << "genCmpLong";
+#if 0
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ int t0 = oatAllocTemp(cUnit);
+ int t1 = oatAllocTemp(cUnit);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ newLIR3(cUnit, kMipsSlt, t0, rlSrc1.highReg, rlSrc2.highReg);
+ newLIR3(cUnit, kMipsSlt, t1, rlSrc2.highReg, rlSrc1.highReg);
+ newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+ LIR* branch = opCmpImmBranch(cUnit, kCondNe, rlResult.lowReg, 0, NULL);
+ newLIR3(cUnit, kMipsSltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
+ newLIR3(cUnit, kMipsSltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
+ newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+ oatFreeTemp(cUnit, t0);
+ oatFreeTemp(cUnit, t1);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branch->target = (LIR*)target;
+ storeValue(cUnit, rlDest, rlResult);
+#endif
+}
+
+LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+ int src2, LIR* target)
+{
+ UNIMPLEMENTED(WARNING) << "opCmpBranch";
+ return NULL;
+#if 0
+ LIR* branch;
+ MipsOpCode sltOp;
+ MipsOpCode brOp;
+ bool cmpZero = false;
+ bool swapped = false;
+ switch(cond) {
+ case kCondEq:
+ brOp = kMipsBeq;
+ cmpZero = true;
+ break;
+ case kCondNe:
+ brOp = kMipsBne;
+ cmpZero = true;
+ break;
+ case kCondCc:
+ sltOp = kMipsSltu;
+ brOp = kMipsBnez;
+ break;
+ case kCondCs:
+ sltOp = kMipsSltu;
+ brOp = kMipsBeqz;
+ break;
+ case kCondGe:
+ sltOp = kMipsSlt;
+ brOp = kMipsBeqz;
+ break;
+ case kCondGt:
+ sltOp = kMipsSlt;
+ brOp = kMipsBnez;
+ swapped = true;
+ break;
+ case kCondLe:
+ sltOp = kMipsSlt;
+ brOp = kMipsBeqz;
+ swapped = true;
+ break;
+ case kCondLt:
+ sltOp = kMipsSlt;
+ brOp = kMipsBnez;
+ break;
+ case kCondHi: // Gtu
+ sltOp = kMipsSltu;
+ brOp = kMipsBnez;
+ swapped = true;
+ break;
+ default:
+ LOG(FATAL) << "No support for ConditionCode: " << (int) cond;
+ return NULL;
+ }
+ if (cmpZero) {
+ branch = newLIR2(cUnit, brOp, src1, src2);
+ } else {
+ int tReg = oatAllocTemp(cUnit);
+ if (swapped) {
+ newLIR3(cUnit, sltOp, tReg, src2, src1);
+ } else {
+ newLIR3(cUnit, sltOp, tReg, src1, src2);
+ }
+ branch = newLIR1(cUnit, brOp, tReg);
+ oatFreeTemp(cUnit, tReg);
+ }
+ branch->target = target;
+ return branch;
+#endif
+}
+
+LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
+ int checkValue, LIR* target)
+{
+ UNIMPLEMENTED(WARNING) >> "opCmpImmBranch";
+ return NULL;
+#if 0
+ LIR* branch;
+ if (checkValue != 0) {
+ // TUNING: handle s16 & kCondLt/Mi case using slti
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, checkValue);
+ branch = opCmpBranch(cUnit, cond, reg, tReg, target);
+ oatFreeTemp(cUnit, tReg);
+ return branch;
+ }
+ MipsOpCode opc;
+ switch(cond) {
+ case kCondEq: opc = kMipsBeqz; break;
+ case kCondGe: opc = kMipsBgez; break;
+ case kCondGt: opc = kMipsBgtz; break;
+ case kCondLe: opc = kMipsBlez; break;
+ //case KCondMi:
+ case kCondLt: opc = kMipsBltz; break;
+ case kCondNe: opc = kMipsBnez; break;
+ default:
+ // Tuning: use slti when applicable
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, checkValue);
+ branch = opCmpBranch(cUnit, cond, reg, tReg, target);
+ oatFreeTemp(cUnit, tReg);
+ return branch;
+ }
+ branch = newLIR1(cUnit, opc, reg);
+ branch->target = target;
+ return branch;
+#endif
+}
+
+LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+ UNIMPLEMENTED(WARNING) << "opRegCopyNoInsert";
+ return NULL;
+#if 0
+ if (FPREG(rDest) || FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kMipsMove,
+ rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
+#endif
+}
+
+LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+ LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, (LIR*)res);
+ return res;
+}
+
+void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
+ int srcLo, int srcHi)
+{
+ UNIMPLEMENTED(WARNING) << "opRegCopyWide";
+#if 0
+ bool destFP = FPREG(destLo) && FPREG(destHi);
+ bool srcFP = FPREG(srcLo) && FPREG(srcHi);
+ assert(FPREG(srcLo) == FPREG(srcHi));
+ assert(FPREG(destLo) == FPREG(destHi));
+ if (destFP) {
+ if (srcFP) {
+ opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ newLIR2(cUnit, kMipsMtc1, srcLo, destLo);
+ newLIR2(cUnit, kMipsMtc1, srcHi, destHi);
+ }
+ } else {
+ if (srcFP) {
+ newLIR2(cUnit, kMipsMfc1, destLo, srcLo);
+ newLIR2(cUnit, kMipsMfc1, destHi, srcHi);
+ } else {
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
+ }
+ }
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
+#endif
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Ralloc.cc b/src/compiler/codegen/x86/X86/Ralloc.cc
new file mode 100644
index 0000000..2e0bb94
--- /dev/null
+++ b/src/compiler/codegen/x86/X86/Ralloc.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace art {
+
+/*
+ * This file contains codegen for the Mips ISA and is intended to be
+ * includes by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
+ int regClass)
+{
+ UNIMPLEMENTED(WARNING) << "oatAllocTypedTemp";
+ return 0;
+#if 0
+ int highReg;
+ int lowReg;
+ int res = 0;
+
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+ lowReg = oatAllocTempDouble(cUnit);
+ highReg = lowReg + 1;
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
+ }
+
+ lowReg = oatAllocTemp(cUnit);
+ highReg = oatAllocTemp(cUnit);
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
+#endif
+}
+
+int oatAllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass)
+{
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
+{
+ return oatAllocTempFloat(cUnit);
+}
+ return oatAllocTemp(cUnit);
+}
+
+void oatInitializeRegAlloc(CompilationUnit* cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "oatInitializeRegAlloc";
+#if 0
+ int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
+ int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
+ int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
+ int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
+ int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
+ kAllocRegAlloc);
+ cUnit->regPool = pool;
+ pool->numCoreRegs = numRegs;
+ pool->coreRegs = (RegisterInfo *)
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
+ true, kAllocRegAlloc);
+ pool->numFPRegs = numFPRegs;
+ pool->FPRegs = (RegisterInfo *)
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
+ oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
+ oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < numReserved; i++) {
+ if (NO_SUSPEND && !cUnit->genDebugger &&
+ (reservedRegs[i] == rSUSPEND)) {
+ //To measure cost of suspend check
+ continue;
+ }
+ oatMarkInUse(cUnit, reservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < numTemps; i++) {
+ oatMarkTemp(cUnit, coreTemps[i]);
+ }
+ for (int i = 0; i < numFPTemps; i++) {
+ oatMarkTemp(cUnit, fpTemps[i]);
+ }
+ // Construct the alias map.
+ cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
+ sizeof(cUnit->phiAliasMap[0]), false,
+ kAllocDFInfo);
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ cUnit->phiAliasMap[i] = i;
+ }
+ for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
+ int defReg = phi->ssaRep->defs[0];
+ for (int i = 0; i < phi->ssaRep->numUses; i++) {
+ for (int j = 0; j < cUnit->numSSARegs; j++) {
+ if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
+ cUnit->phiAliasMap[j] = defReg;
+ }
+ }
+ }
+ }
+#endif
+}
+
+void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
+ RegLocation rlFree)
+{
+ if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
+ (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ // No overlap, free both
+ oatFreeTemp(cUnit, rlFree.lowReg);
+ oatFreeTemp(cUnit, rlFree.highReg);
+ }
+}
+
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/X86RallocUtil.cc b/src/compiler/codegen/x86/X86RallocUtil.cc
new file mode 100644
index 0000000..3073258
--- /dev/null
+++ b/src/compiler/codegen/x86/X86RallocUtil.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains Mips-specific register allocation support.
+ */
+
+#include "../../CompilerUtility.h"
+#include "../../CompilerIR.h"
+#include "../..//Dataflow.h"
+#include "MipsLIR.h"
+#include "Codegen.h"
+#include "../Ralloc.h"
+
+namespace art {
+
+/*
+ * TUNING: is leaf? Can't just use "hasInvoke" to determine as some
+ * instructions might call out to C/assembly helper functions. Until
+ * machinery is in place, always spill lr.
+ */
+
+void oatAdjustSpillMask(CompilationUnit* cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "oatAdjustSpillMask";
+#if 0
+ cUnit->coreSpillMask |= (1 << r_RA);
+ cUnit->numCoreSpills++;
+#endif
+}
+
+/*
+ * Mark a callee-save fp register as promoted. Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask. Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void oatMarkPreservedSingle(CompilationUnit* cUnit, int sReg, int reg)
+{
+ UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
+#if 0
+ LOG(FATAL) << "No support yet for promoted FP regs";
+#endif
+}
+
+void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+{
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
+ }
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (oatS2VReg(cUnit, info2->sReg) <
+ oatS2VReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = oatS2VReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rSP,
+ oatVRegOffset(cUnit, vReg),
+ info1->reg, info1->partner);
+ }
+}
+
+void oatFlushReg(CompilationUnit* cUnit, int reg)
+{
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = oatS2VReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rSP,
+ oatVRegOffset(cUnit, vReg),
+ reg, kWord);
+ }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool oatIsFpReg(int reg) {
+ return FPREG(reg);
+}
+
+uint32_t oatFpRegMask() {
+ return FP_REG_MASK;
+}
+
+/* Clobber all regs that might be used by an external C call */
+extern void oatClobberCalleeSave(CompilationUnit *cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "oatClobberCalleeSave";
+#if 0
+ oatClobber(cUnit, r_ZERO);
+ oatClobber(cUnit, r_AT);
+ oatClobber(cUnit, r_V0);
+ oatClobber(cUnit, r_V1);
+ oatClobber(cUnit, r_A0);
+ oatClobber(cUnit, r_A1);
+ oatClobber(cUnit, r_A2);
+ oatClobber(cUnit, r_A3);
+ oatClobber(cUnit, r_T0);
+ oatClobber(cUnit, r_T1);
+ oatClobber(cUnit, r_T2);
+ oatClobber(cUnit, r_T3);
+ oatClobber(cUnit, r_T4);
+ oatClobber(cUnit, r_T5);
+ oatClobber(cUnit, r_T6);
+ oatClobber(cUnit, r_T7);
+ oatClobber(cUnit, r_T8);
+ oatClobber(cUnit, r_T9);
+ oatClobber(cUnit, r_K0);
+ oatClobber(cUnit, r_K1);
+ oatClobber(cUnit, r_GP);
+ oatClobber(cUnit, r_FP);
+ oatClobber(cUnit, r_RA);
+ oatClobber(cUnit, r_F0);
+ oatClobber(cUnit, r_F1);
+ oatClobber(cUnit, r_F2);
+ oatClobber(cUnit, r_F3);
+ oatClobber(cUnit, r_F4);
+ oatClobber(cUnit, r_F5);
+ oatClobber(cUnit, r_F6);
+ oatClobber(cUnit, r_F7);
+ oatClobber(cUnit, r_F8);
+ oatClobber(cUnit, r_F9);
+ oatClobber(cUnit, r_F10);
+ oatClobber(cUnit, r_F11);
+ oatClobber(cUnit, r_F12);
+ oatClobber(cUnit, r_F13);
+ oatClobber(cUnit, r_F14);
+ oatClobber(cUnit, r_F15);
+#endif
+}
+
+extern RegLocation oatGetReturnWide(CompilationUnit* cUnit)
+{
+ RegLocation res = LOC_C_RETURN_WIDE;
+ oatClobber(cUnit, res.lowReg);
+ oatClobber(cUnit, res.highReg);
+ oatMarkInUse(cUnit, res.lowReg);
+ oatMarkInUse(cUnit, res.highReg);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
+}
+
+extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit)
+{
+ RegLocation res = LOC_C_RETURN_WIDE_ALT;
+ oatClobber(cUnit, res.lowReg);
+ oatClobber(cUnit, res.highReg);
+ oatMarkInUse(cUnit, res.lowReg);
+ oatMarkInUse(cUnit, res.highReg);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
+}
+
+extern RegLocation oatGetReturn(CompilationUnit* cUnit)
+{
+ RegLocation res = LOC_C_RETURN;
+ oatClobber(cUnit, res.lowReg);
+ oatMarkInUse(cUnit, res.lowReg);
+ return res;
+}
+
+extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
+{
+ RegLocation res = LOC_C_RETURN_ALT;
+ oatClobber(cUnit, res.lowReg);
+ oatMarkInUse(cUnit, res.lowReg);
+ return res;
+}
+
+extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
+{
+ return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
+}
+
+/* To be used when explicitly managing register use */
+extern void oatLockCallTemps(CompilationUnit* cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "oatLockCallTemps";
+#if 0
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
+#endif
+}
+
+/* To be used when explicitly managing register use */
+extern void oatFreeCallTemps(CompilationUnit* cUnit)
+{
+ UNIMPLEMENTED(WARNING) << "oatFreeCallTemps";
+#if 0
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
+#endif
+}
+
+/* Convert an instruction to a NOP */
+void oatNopLIR( LIR* lir)
+{
+ ((LIR*)lir)->flags.isNop = true;
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
new file mode 100644
index 0000000..41dbe1b
--- /dev/null
+++ b/src/compiler/codegen/x86/x86/ArchVariant.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace art {
+
+/*
+ * This file is included by Codegen-mips.c, and implements architecture
+ * variant-specific code.
+ */
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+OatInstructionSetType oatInstructionSet(void)
+{
+ return DALVIK_OAT_X86;
+}
+
+/* Architecture-specific initializations and checks go here */
+bool oatArchVariantInit(void)
+{
+ return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
+}
+
+void oatGenMemBarrier(CompilationUnit *cUnit, int barrierKind)
+{
+#if ANDROID_SMP != 0
+ UNIMPLEMENTED(WARNING) << "oatGenMemBarrier";
+#if 0
+ newLIR1(cUnit, kMipsSync, barrierKind);
+#endif
+#endif
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/x86/Codegen.cc b/src/compiler/codegen/x86/x86/Codegen.cc
new file mode 100644
index 0000000..b07ecc7
--- /dev/null
+++ b/src/compiler/codegen/x86/x86/Codegen.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _CODEGEN_C
+#define TARGET_MIPS
+
+#include "../../../Dalvik.h"
+#include "../../../CompilerInternals.h"
+#include "../X86LIR.h"
+#include "../../Ralloc.h"
+#include "../Codegen.h"
+
+/* Mips codegen building blocks */
+#include "../../CodegenUtil.cc"
+
+/* Mips-specific factory utilities */
+#include "../X86/Factory.cc"
+/* Target independent factory utilities */
+#include "../../CodegenFactory.cc"
+/* Target independent gen routines */
+#include "../../GenCommon.cc"
+/* Shared invoke gen routines */
+#include "../../GenInvoke.cc"
+/* Mips-specific factory utilities */
+#include "../ArchFactory.cc"
+
+/* X86-specific codegen routines */
+#include "../X86/Gen.cc"
+/* FP codegen routines */
+#include "../FP/MipsFP.cc"
+
+/* X86-specific register allocation */
+#include "../X86/Ralloc.cc"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../../MethodCodegenDriver.cc"
+
+/* Target-independent local optimizations */
+#include "../../LocalOptimizations.cc"
+
+/* Architecture manifest */
+#include "ArchVariant.cc"