summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick/mir_to_lir.h
blob: bcd0eb190e5724bb9cda5925b7f7daa5326f5af2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
/*
 * Copyright (C) 2012 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_

#include "invoke_type.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
#include "dex/backend.h"
#include "dex/growable_array.h"
#include "dex/arena_allocator.h"
#include "driver/compiler_driver.h"
#include "leb128_encoder.h"
#include "safe_map.h"

namespace art {

/*
 * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
 * add type safety (see runtime/offsets.h).
 */
typedef uint32_t DexOffset;          // Dex offset in code units.
typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
typedef uint32_t CodeOffset;         // Native code offset in bytes.

// Set to 1 to measure cost of suspend check.
#define NO_SUSPEND 0

#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
#define IS_BRANCH            (1ULL << kIsBranch)
#define IS_IT                (1ULL << kIsIT)
#define IS_LOAD              (1ULL << kMemLoad)
#define IS_QUAD_OP           (1ULL << kIsQuadOp)
#define IS_QUIN_OP           (1ULL << kIsQuinOp)
#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
#define IS_STORE             (1ULL << kMemStore)
#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
#define NO_OPERAND           (1ULL << kNoOperand)
#define REG_DEF0             (1ULL << kRegDef0)
#define REG_DEF1             (1ULL << kRegDef1)
#define REG_DEFA             (1ULL << kRegDefA)
#define REG_DEFD             (1ULL << kRegDefD)
#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
#define REG_DEF_LIST0        (1ULL << kRegDefList0)
#define REG_DEF_LIST1        (1ULL << kRegDefList1)
#define REG_DEF_LR           (1ULL << kRegDefLR)
#define REG_DEF_SP           (1ULL << kRegDefSP)
#define REG_USE0             (1ULL << kRegUse0)
#define REG_USE1             (1ULL << kRegUse1)
#define REG_USE2             (1ULL << kRegUse2)
#define REG_USE3             (1ULL << kRegUse3)
#define REG_USE4             (1ULL << kRegUse4)
#define REG_USEA             (1ULL << kRegUseA)
#define REG_USEC             (1ULL << kRegUseC)
#define REG_USED             (1ULL << kRegUseD)
#define REG_USEB             (1ULL << kRegUseB)
#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
#define REG_USE_LIST0        (1ULL << kRegUseList0)
#define REG_USE_LIST1        (1ULL << kRegUseList1)
#define REG_USE_LR           (1ULL << kRegUseLR)
#define REG_USE_PC           (1ULL << kRegUsePC)
#define REG_USE_SP           (1ULL << kRegUseSP)
#define SETS_CCODES          (1ULL << kSetsCCodes)
#define USES_CCODES          (1ULL << kUsesCCodes)

// Common combo register usage patterns.
#define REG_DEF01            (REG_DEF0 | REG_DEF1)
#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
#define REG_DEF0_USE123      (REG_DEF0 | REG_USE123)
#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
#define REG_USE012           (REG_USE01 | REG_USE2)
#define REG_USE014           (REG_USE01 | REG_USE4)
#define REG_USE01            (REG_USE0 | REG_USE1)
#define REG_USE02            (REG_USE0 | REG_USE2)
#define REG_USE12            (REG_USE1 | REG_USE2)
#define REG_USE23            (REG_USE2 | REG_USE3)
#define REG_USE123           (REG_USE1 | REG_USE2 | REG_USE3)

struct BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
struct MIR;
struct LIR;
struct RegLocation;
struct RegisterInfo;
class DexFileMethodInliner;
class MIRGraph;
class Mir2Lir;

typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
                            const MethodReference& target_method,
                            uint32_t method_idx, uintptr_t direct_code,
                            uintptr_t direct_method, InvokeType type);

typedef std::vector<uint8_t> CodeBuffer;

struct UseDefMasks {
  uint64_t use_mask;        // Resource mask for use.
  uint64_t def_mask;        // Resource mask for def.
};

struct AssemblyInfo {
  LIR* pcrel_next;           // Chain of LIR nodes needing pc relative fixups.
  uint8_t bytes[16];         // Encoded instruction bytes.
};

struct LIR {
  CodeOffset offset;             // Offset of this instruction.
  NarrowDexOffset dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
  int16_t opcode;
  LIR* next;
  LIR* prev;
  LIR* target;
  struct {
    unsigned int alias_info:17;  // For Dalvik register disambiguation.
    bool is_nop:1;               // LIR is optimized away.
    unsigned int size:4;         // Note: size of encoded instruction is in bytes.
    bool use_def_invalid:1;      // If true, masks should not be used.
    unsigned int generation:1;   // Used to track visitation state during fixup pass.
    unsigned int fixup:8;        // Fixup kind.
  } flags;
  union {
    UseDefMasks m;               // Use & Def masks used during optimization.
    AssemblyInfo a;              // Instruction encoding used during assembly phase.
  } u;
  int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
};

// Target-specific initialization.
Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
                          ArenaAllocator* const arena);
Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
                          ArenaAllocator* const arena);
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
                          ArenaAllocator* const arena);

// Utility macros to traverse the LIR list.
#define NEXT_LIR(lir) (lir->next)
#define PREV_LIR(lir) (lir->prev)

// Defines for alias_info (tracks Dalvik register references).
#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
#define DECODE_ALIAS_INFO_WIDE_FLAG     (0x10000)
#define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
#define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))

// Common resource macros.
#define ENCODE_CCODE            (1ULL << kCCode)
#define ENCODE_FP_STATUS        (1ULL << kFPStatus)

// Abstract memory locations.
#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
#define ENCODE_LITERAL          (1ULL << kLiteral)
#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)

#define ENCODE_ALL              (~0ULL)
#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)

// Mask to denote sreg as the start of a double.  Must not interfere with low 16 bits.
#define STARTING_DOUBLE_SREG 0x10000

// TODO: replace these macros
#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))

class Mir2Lir : public Backend {
  public:
    /*
     * Auxiliary information describing the location of data embedded in the Dalvik
     * byte code stream.
     */
    struct EmbeddedData {
      CodeOffset offset;        // Code offset of data block.
      const uint16_t* table;      // Original dex data.
      DexOffset vaddr;            // Dalvik offset of parent opcode.
    };

    struct FillArrayData : EmbeddedData {
      int32_t size;
    };

    struct SwitchTable : EmbeddedData {
      LIR* anchor;                // Reference instruction for relative offsets.
      LIR** targets;              // Array of case targets.
    };

    /* Static register use counts */
    struct RefCounts {
      int count;
      int s_reg;
    };

    /*
     * Data structure tracking the mapping between a Dalvik register (pair) and a
     * native register (pair). The idea is to reuse the previously loaded value
     * if possible, otherwise to keep the value in a native register as long as
     * possible.
     */
    struct RegisterInfo {
      int reg;                    // Reg number
      bool in_use;                // Has it been allocated?
      bool is_temp;               // Can allocate as temp?
      bool pair;                  // Part of a register pair?
      int partner;                // If pair, other reg of pair.
      bool live;                  // Is there an associated SSA name?
      bool dirty;                 // If live, is it dirty?
      int s_reg;                  // Name of live value.
      LIR *def_start;             // Starting inst in last def sequence.
      LIR *def_end;               // Ending inst in last def sequence.
    };

    struct RegisterPool {
       int num_core_regs;
       RegisterInfo *core_regs;
       int next_core_reg;
       int num_fp_regs;
       RegisterInfo *FPRegs;
       int next_fp_reg;
     };

    struct PromotionMap {
      RegLocationType core_location:3;
      uint8_t core_reg;
      RegLocationType fp_location:3;
      uint8_t FpReg;
      bool first_in_pair;
    };

    virtual ~Mir2Lir() {}

    int32_t s4FromSwitchData(const void* switch_data) {
      return *reinterpret_cast<const int32_t*>(switch_data);
    }

    RegisterClass oat_reg_class_by_size(OpSize size) {
      return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
              size == kSignedByte) ? kCoreReg : kAnyReg;
    }

    size_t CodeBufferSizeInBytes() {
      return code_buffer_.size() / sizeof(code_buffer_[0]);
    }

    bool IsPseudoLirOp(int opcode) {
      return (opcode < 0);
    }

    /*
     * LIR operands are 32-bit integers.  Sometimes, (especially for managing
     * instructions which require PC-relative fixups), we need the operands to carry
     * pointers.  To do this, we assign these pointers an index in pointer_storage_, and
     * hold that index in the operand array.
     * TUNING: If use of these utilities becomes more common on 32-bit builds, it
     * may be worth conditionally-compiling a set of identity functions here.
     */
    uint32_t WrapPointer(void* pointer) {
      uint32_t res = pointer_storage_.Size();
      pointer_storage_.Insert(pointer);
      return res;
    }

    void* UnwrapPointer(size_t index) {
      return pointer_storage_.Get(index);
    }

    // strdup(), but allocates from the arena.
    char* ArenaStrdup(const char* str) {
      size_t len = strlen(str) + 1;
      char* res = reinterpret_cast<char*>(arena_->Alloc(len, ArenaAllocator::kAllocMisc));
      if (res != NULL) {
        strncpy(res, str, len);
      }
      return res;
    }

    // Shared by all targets - implemented in codegen_util.cc
    void AppendLIR(LIR* lir);
    void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
    void InsertLIRAfter(LIR* current_lir, LIR* new_lir);

    int ComputeFrameSize();
    virtual void Materialize();
    virtual CompiledMethod* GetCompiledMethod();
    void MarkSafepointPC(LIR* inst);
    bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
    void SetupResourceMasks(LIR* lir);
    void SetMemRefType(LIR* lir, bool is_load, int mem_type);
    void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
    void SetupRegMask(uint64_t* mask, int reg);
    void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
    void DumpPromotionMap();
    void CodegenDump();
    LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
    LIR* NewLIR0(int opcode);
    LIR* NewLIR1(int opcode, int dest);
    LIR* NewLIR2(int opcode, int dest, int src1);
    LIR* NewLIR3(int opcode, int dest, int src1, int src2);
    LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
    LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
    LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
    LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
    LIR* AddWordData(LIR* *constant_list_p, int value);
    LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
    void ProcessSwitchTables();
    void DumpSparseSwitchTable(const uint16_t* table);
    void DumpPackedSwitchTable(const uint16_t* table);
    void MarkBoundary(DexOffset offset, const char* inst_str);
    void NopLIR(LIR* lir);
    void UnlinkLIR(LIR* lir);
    bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
    bool IsInexpensiveConstant(RegLocation rl_src);
    ConditionCode FlipComparisonOrder(ConditionCode before);
    void InstallLiteralPools();
    void InstallSwitchTables();
    void InstallFillArrayData();
    bool VerifyCatchEntries();
    void CreateMappingTables();
    void CreateNativeGcMap();
    int AssignLiteralOffset(CodeOffset offset);
    int AssignSwitchTablesOffset(CodeOffset offset);
    int AssignFillArrayDataOffset(CodeOffset offset);
    LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
    void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
    void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);

    // Shared by all targets - implemented in local_optimizations.cc
    void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
    void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
    void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
    void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);

    // Shared by all targets - implemented in ralloc_util.cc
    int GetSRegHi(int lowSreg);
    bool oat_live_out(int s_reg);
    int oatSSASrc(MIR* mir, int num);
    void SimpleRegAlloc();
    void ResetRegPool();
    void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
    void DumpRegPool(RegisterInfo* p, int num_regs);
    void DumpCoreRegPool();
    void DumpFpRegPool();
    /* Mark a temp register as dead.  Does not affect allocation state. */
    void Clobber(int reg) {
      ClobberBody(GetRegInfo(reg));
    }
    void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
    void ClobberSReg(int s_reg);
    int SRegToPMap(int s_reg);
    void RecordCorePromotion(int reg, int s_reg);
    int AllocPreservedCoreReg(int s_reg);
    void RecordFpPromotion(int reg, int s_reg);
    int AllocPreservedSingle(int s_reg);
    int AllocPreservedDouble(int s_reg);
    int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
    virtual int AllocTempDouble();
    int AllocFreeTemp();
    int AllocTemp();
    int AllocTempFloat();
    RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg);
    RegisterInfo* AllocLive(int s_reg, int reg_class);
    void FreeTemp(int reg);
    RegisterInfo* IsLive(int reg);
    RegisterInfo* IsTemp(int reg);
    RegisterInfo* IsPromoted(int reg);
    bool IsDirty(int reg);
    void LockTemp(int reg);
    void ResetDef(int reg);
    void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
    void MarkDef(RegLocation rl, LIR *start, LIR *finish);
    void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
    RegLocation WideToNarrow(RegLocation rl);
    void ResetDefLoc(RegLocation rl);
    virtual void ResetDefLocWide(RegLocation rl);
    void ResetDefTracking();
    void ClobberAllRegs();
    void FlushAllRegsBody(RegisterInfo* info, int num_regs);
    void FlushAllRegs();
    bool RegClassMatches(int reg_class, int reg);
    void MarkLive(int reg, int s_reg);
    void MarkTemp(int reg);
    void UnmarkTemp(int reg);
    void MarkPair(int low_reg, int high_reg);
    void MarkClean(RegLocation loc);
    void MarkDirty(RegLocation loc);
    void MarkInUse(int reg);
    void CopyRegInfo(int new_reg, int old_reg);
    bool CheckCorePoolSanity();
    RegLocation UpdateLoc(RegLocation loc);
    virtual RegLocation UpdateLocWide(RegLocation loc);
    RegLocation UpdateRawLoc(RegLocation loc);

    /**
     * @brief Used to load register location into a typed temporary or pair of temporaries.
     * @see EvalLoc
     * @param loc The register location to load from.
     * @param reg_class Type of register needed.
     * @param update Whether the liveness information should be updated.
     * @return Returns the properly typed temporary in physical register pairs.
     */
    virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);

    /**
     * @brief Used to load register location into a typed temporary.
     * @param loc The register location to load from.
     * @param reg_class Type of register needed.
     * @param update Whether the liveness information should be updated.
     * @return Returns the properly typed temporary in physical register.
     */
    virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);

    void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
    void DumpCounts(const RefCounts* arr, int size, const char* msg);
    void DoPromotion();
    int VRegOffset(int v_reg);
    int SRegOffset(int s_reg);
    RegLocation GetReturnWide(bool is_double);
    RegLocation GetReturn(bool is_float);
    RegisterInfo* GetRegInfo(int reg);

    // Shared by all targets - implemented in gen_common.cc.
    bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
                          RegLocation rl_src, RegLocation rl_dest, int lit);
    bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
    void HandleSuspendLaunchPads();
    void HandleIntrinsicLaunchPads();
    void HandleThrowLaunchPads();
    void GenBarrier();
    LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
    LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
                       ThrowKind kind);
    LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags);
    LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
                        ThrowKind kind);
    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
                                 LIR* taken, LIR* fall_through);
    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
    void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src);
    void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                     RegLocation rl_src);
    void GenFilledNewArray(CallInfo* info);
    void GenSput(uint32_t field_idx, RegLocation rl_src,
                 bool is_long_or_double, bool is_object);
    void GenSget(uint32_t field_idx, RegLocation rl_dest,
                 bool is_long_or_double, bool is_object);
    void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
    void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
    void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
                        RegLocation rl_src);

    void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
    void GenConstString(uint32_t string_idx, RegLocation rl_dest);
    void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
    void GenThrow(RegLocation rl_src);
    void GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
                       RegLocation rl_src);
    void GenCheckCast(uint32_t insn_idx, uint32_t type_idx,
                      RegLocation rl_src);
    void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
                      RegLocation rl_src1, RegLocation rl_src2);
    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                        RegLocation rl_src1, RegLocation rl_shift);
    void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
                       RegLocation rl_src1, RegLocation rl_src2);
    void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
                          RegLocation rl_src, int lit);
    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                        RegLocation rl_src1, RegLocation rl_src2);
    void GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest,
                           RegLocation rl_src);
    void GenSuspendTest(int opt_flags);
    void GenSuspendTestAndBranch(int opt_flags, LIR* target);
    // This will be overridden by x86 implementation.
    virtual void GenConstWide(RegLocation rl_dest, int64_t value);

    // Shared by all targets - implemented in gen_invoke.cc.
    int CallHelperSetup(ThreadOffset helper_offset);
    LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc);
    void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
    void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
    void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
                                      bool safepoint_pc);
    void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
                                 bool safepoint_pc);
    void CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
                                         RegLocation arg1, bool safepoint_pc);
    void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0,
                                         int arg1, bool safepoint_pc);
    void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
                                 bool safepoint_pc);
    void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                 bool safepoint_pc);
    void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0,
                                    bool safepoint_pc);
    void CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
    void CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
                                               RegLocation arg2, bool safepoint_pc);
    void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset,
                                                 RegLocation arg0, RegLocation arg1,
                                                 bool safepoint_pc);
    void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
                                 bool safepoint_pc);
    void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                    int arg2, bool safepoint_pc);
    void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0,
                                               RegLocation arg2, bool safepoint_pc);
    void CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2,
                                       bool safepoint_pc);
    void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
                                                    int arg0, RegLocation arg1, RegLocation arg2,
                                                    bool safepoint_pc);
    void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset,
                                                            RegLocation arg0, RegLocation arg1,
                                                            RegLocation arg2,
                                                            bool safepoint_pc);
    void GenInvoke(CallInfo* info);
    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
                             NextCallInsn next_call_insn,
                             const MethodReference& target_method,
                             uint32_t vtable_idx,
                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
                             bool skip_this);
    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
                           NextCallInsn next_call_insn,
                           const MethodReference& target_method,
                           uint32_t vtable_idx,
                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
                           bool skip_this);

    /**
     * @brief Used to determine the register location of destination.
     * @details This is needed during generation of inline intrinsics because it finds destination of return,
     * either the physical register or the target of move-result.
     * @param info Information about the invoke.
     * @return Returns the destination location.
     */
    RegLocation InlineTarget(CallInfo* info);

    /**
     * @brief Used to determine the wide register location of destination.
     * @see InlineTarget
     * @param info Information about the invoke.
     * @return Returns the destination location.
     */
    RegLocation InlineTargetWide(CallInfo* info);

    bool GenInlinedCharAt(CallInfo* info);
    bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
    bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
    bool GenInlinedAbsInt(CallInfo* info);
    bool GenInlinedAbsLong(CallInfo* info);
    bool GenInlinedFloatCvt(CallInfo* info);
    bool GenInlinedDoubleCvt(CallInfo* info);
    bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
    bool GenInlinedStringCompareTo(CallInfo* info);
    bool GenInlinedCurrentThread(CallInfo* info);
    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
    bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
                             bool is_volatile, bool is_ordered);
    int LoadArgRegs(CallInfo* info, int call_state,
                    NextCallInsn next_call_insn,
                    const MethodReference& target_method,
                    uint32_t vtable_idx,
                    uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
                    bool skip_this);

    // Shared by all targets - implemented in gen_loadstore.cc.
    RegLocation LoadCurrMethod();
    void LoadCurrMethodDirect(int r_tgt);
    LIR* LoadConstant(int r_dest, int value);
    LIR* LoadWordDisp(int rBase, int displacement, int r_dest);
    RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
    RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
    void LoadValueDirect(RegLocation rl_src, int r_dest);
    void LoadValueDirectFixed(RegLocation rl_src, int r_dest);
    void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi);
    void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi);
    LIR* StoreWordDisp(int rBase, int displacement, int r_src);

    /**
     * @brief Used to do the final store in the destination as per bytecode semantics.
     * @param rl_dest The destination dalvik register location.
     * @param rl_src The source register location. Can be either physical register or dalvik register.
     */
    void StoreValue(RegLocation rl_dest, RegLocation rl_src);

    /**
     * @brief Used to do the final store in a wide destination as per bytecode semantics.
     * @see StoreValue
     * @param rl_dest The destination dalvik register location.
     * @param rl_src The source register location. Can be either physical register or dalvik register.
     */
    void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);

    /**
     * @brief Used to do the final store in a wide destination as per bytecode semantics.
     * @see StoreValueWide
     * @param rl_dest The destination dalvik register location.
     * @param rl_src The source register location. It must be kLocPhysReg
     *
     * This is used for x86 two operand computations, where we have computed the correct
     * register values that now need to be properly registered.  This is used to avoid an
     * extra pair of register copies that would result if StoreValueWide was called.
     */
    void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src);

    // Shared by all targets - implemented in mir_to_lir.cc.
    void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
    void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
    bool MethodBlockCodeGen(BasicBlock* bb);
    void SpecialMIR2LIR(const InlineMethod& special);
    void MethodMIR2LIR();



    // Required for target - codegen helpers.
    virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
    virtual int LoadHelper(ThreadOffset offset) = 0;
    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                  int s_reg) = 0;
    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0;
    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
                                     int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0;
    virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0;
    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0;
    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0;
    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0;
    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0;
    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
                                      int r_src, int r_src_hi, OpSize size, int s_reg) = 0;
    virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0;

    // Required for target - register utilities.
    virtual bool IsFpReg(int reg) = 0;
    virtual bool SameRegType(int reg1, int reg2) = 0;
    virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
    virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
    virtual int S2d(int low_reg, int high_reg) = 0;
    virtual int TargetReg(SpecialTargetRegister reg) = 0;
    virtual RegLocation GetReturnAlt() = 0;
    virtual RegLocation GetReturnWideAlt() = 0;
    virtual RegLocation LocCReturn() = 0;
    virtual RegLocation LocCReturnDouble() = 0;
    virtual RegLocation LocCReturnFloat() = 0;
    virtual RegLocation LocCReturnWide() = 0;
    virtual uint32_t FpRegMask() = 0;
    virtual uint64_t GetRegMaskCommon(int reg) = 0;
    virtual void AdjustSpillMask() = 0;
    virtual void ClobberCallerSave() = 0;
    virtual void FlushReg(int reg) = 0;
    virtual void FlushRegWide(int reg1, int reg2) = 0;
    virtual void FreeCallTemps() = 0;
    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0;
    virtual void LockCallTemps() = 0;
    virtual void MarkPreservedSingle(int v_reg, int reg) = 0;
    virtual void CompilerInitializeRegAlloc() = 0;

    // Required for target - miscellaneous.
    virtual void AssembleLIR() = 0;
    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
    virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags) = 0;
    virtual const char* GetTargetInstFmt(int opcode) = 0;
    virtual const char* GetTargetInstName(int opcode) = 0;
    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
    virtual uint64_t GetPCUseDefEncoding() = 0;
    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
    virtual int GetInsnSize(LIR* lir) = 0;
    virtual bool IsUnconditionalBranch(LIR* lir) = 0;

    // Required for target - Dalvik-level generators.
    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                   RegLocation rl_src1, RegLocation rl_src2) = 0;
    virtual void GenMulLong(Instruction::Code,
                            RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
    virtual void GenAddLong(Instruction::Code,
                            RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
    virtual void GenAndLong(Instruction::Code,
                            RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
    virtual void GenArithOpDouble(Instruction::Code opcode,
                                  RegLocation rl_dest, RegLocation rl_src1,
                                  RegLocation rl_src2) = 0;
    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                          RegLocation rl_src1, RegLocation rl_src2) = 0;
    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
                               RegLocation rl_src) = 0;
    virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0;

    /**
     * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max.
     * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm
     * that applies on integers. The generated code will write the smallest or largest value
     * directly into the destination register as specified by the invoke information.
     * @param info Information about the invoke.
     * @param is_min If true generates code that computes minimum. Otherwise computes maximum.
     * @return Returns true if successfully generated
     */
    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0;

    virtual bool GenInlinedSqrt(CallInfo* info) = 0;
    virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
    virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
    virtual void GenOrLong(Instruction::Code,
                           RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2) = 0;
    virtual void GenSubLong(Instruction::Code,
                            RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
    virtual void GenXorLong(Instruction::Code,
                            RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base,
                                int offset, ThrowKind kind) = 0;
    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi,
                                  bool is_div) = 0;
    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit,
                                     bool is_div) = 0;
    /*
     * @brief Generate an integer div or rem operation by a literal.
     * @param rl_dest Destination Location.
     * @param rl_src1 Numerator Location.
     * @param rl_src2 Divisor Location.
     * @param is_div 'true' if this is a division, 'false' for a remainder.
     * @param check_zero 'true' if an exception should be generated if the divisor is 0.
     */
    virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                                  RegLocation rl_src2, bool is_div, bool check_zero) = 0;
    /*
     * @brief Generate an integer div or rem operation by a literal.
     * @param rl_dest Destination Location.
     * @param rl_src Numerator Location.
     * @param lit Divisor.
     * @param is_div 'true' if this is a division, 'false' for a remainder.
     */
    virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1,
                                     int lit, bool is_div) = 0;
    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;

    /**
     * @brief Used for generating code that throws ArithmeticException if both registers are zero.
     * @details This is used for generating DivideByZero checks when divisor is held in two separate registers.
     * @param reg_lo The register holding the lower 32-bits.
     * @param reg_hi The register holding the upper 32-bits.
     */
    virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0;

    virtual void GenEntrySequence(RegLocation* ArgLocs,
                                  RegLocation rl_method) = 0;
    virtual void GenExitSequence() = 0;
    virtual void GenFillArrayData(DexOffset table_offset,
                                  RegLocation rl_src) = 0;
    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                     bool is_double) = 0;
    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;

    /**
     * @brief Lowers the kMirOpSelect MIR into LIR.
     * @param bb The basic block in which the MIR is from.
     * @param mir The MIR whose opcode is kMirOpSelect.
     */
    virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;

    virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0;
    virtual void GenMoveException(RegLocation rl_dest) = 0;
    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                               RegLocation rl_result, int lit, int first_bit,
                                               int second_bit) = 0;
    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
    virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset,
                                 RegLocation rl_src) = 0;
    virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
                                 RegLocation rl_src) = 0;
    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
                                const InlineMethod& special) = 0;
    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                             RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                             RegLocation rl_index, RegLocation rl_src, int scale,
                             bool card_mark) = 0;
    virtual void GenShiftImmOpLong(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1,
                                   RegLocation rl_shift) = 0;

    // Required for target - single operation generators.
    virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) = 0;
    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target) = 0;
    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) = 0;
    virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
    virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
    virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
    virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0;
    virtual LIR* OpReg(OpKind op, int r_dest_src) = 0;
    virtual LIR* OpRegCopy(int r_dest, int r_src) = 0;
    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0;
    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0;
    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;

    /**
     * @brief Used for generating a conditional register to register operation.
     * @param op The opcode kind.
     * @param cc The condition code that when true will perform the opcode.
     * @param r_dest The destination physical register.
     * @param r_src The source physical register.
     * @return Returns the newly created LIR or null in case of creation failure.
     */
    virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) = 0;

    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0;
    virtual LIR* OpTestSuspend(LIR* target) = 0;
    virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
    virtual LIR* OpVldm(int rBase, int count) = 0;
    virtual LIR* OpVstm(int rBase, int count) = 0;
    virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0;
    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0;
    virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
    virtual bool InexpensiveConstantInt(int32_t value) = 0;
    virtual bool InexpensiveConstantFloat(int32_t value) = 0;
    virtual bool InexpensiveConstantLong(int64_t value) = 0;
    virtual bool InexpensiveConstantDouble(int64_t value) = 0;

    // May be optimized by targets.
    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);

    // Temp workaround
    void Workaround7250540(RegLocation rl_dest, int value);

  protected:
    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);

    CompilationUnit* GetCompilationUnit() {
      return cu_;
    }
    /*
     * @brief Returns the index of the lowest set bit in 'x'.
     * @param x Value to be examined.
     * @returns The bit number of the lowest bit set in the value.
     */
    int32_t LowestSetBit(uint64_t x);
    /*
     * @brief Is this value a power of two?
     * @param x Value to be examined.
     * @returns 'true' if only 1 bit is set in the value.
     */
    bool IsPowerOfTwo(uint64_t x);
    /*
     * @brief Do these SRs overlap?
     * @param rl_op1 One RegLocation
     * @param rl_op2 The other RegLocation
     * @return 'true' if the VR pairs overlap
     *
     * Check to see if a result pair has a misaligned overlap with an operand pair.  This
     * is not usual for dx to generate, but it is legal (for now).  In a future rev of
     * dex, we'll want to make this case illegal.
     */
    bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2);

    /*
     * @brief Force a location (in a register) into a temporary register
     * @param loc location of result
     * @returns update location
     */
    RegLocation ForceTemp(RegLocation loc);

    /*
     * @brief Force a wide location (in registers) into temporary registers
     * @param loc location of result
     * @returns update location
     */
    RegLocation ForceTempWide(RegLocation loc);

  private:
    void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
                            RegLocation rl_src);
    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
                                    bool type_known_abstract, bool use_declaring_class,
                                    bool can_assume_type_is_in_dex_cache,
                                    uint32_t type_idx, RegLocation rl_dest,
                                    RegLocation rl_src);

    void ClobberBody(RegisterInfo* p);
    void ResetDefBody(RegisterInfo* p) {
      p->def_start = NULL;
      p->def_end = NULL;
    }

  public:
    // TODO: add accessors for these.
    LIR* literal_list_;                        // Constants.
    LIR* method_literal_list_;                 // Method literals requiring patching.
    LIR* class_literal_list_;                  // Class literals requiring patching.
    LIR* code_literal_list_;                   // Code literals requiring patching.
    LIR* first_fixup_;                         // Doubly-linked list of LIR nodes requiring fixups.

  protected:
    CompilationUnit* const cu_;
    MIRGraph* const mir_graph_;
    GrowableArray<SwitchTable*> switch_tables_;
    GrowableArray<FillArrayData*> fill_array_data_;
    GrowableArray<LIR*> throw_launchpads_;
    GrowableArray<LIR*> suspend_launchpads_;
    GrowableArray<LIR*> intrinsic_launchpads_;
    GrowableArray<RegisterInfo*> tempreg_info_;
    GrowableArray<RegisterInfo*> reginfo_map_;
    GrowableArray<void*> pointer_storage_;
    CodeOffset current_code_offset_;    // Working byte offset of machine instructons.
    CodeOffset data_offset_;            // starting offset of literal pool.
    size_t total_size_;                   // header + code size.
    LIR* block_label_list_;
    PromotionMap* promotion_map_;
    /*
     * TODO: The code generation utilities don't have a built-in
     * mechanism to propagate the original Dalvik opcode address to the
     * associated generated instructions.  For the trace compiler, this wasn't
     * necessary because the interpreter handled all throws and debugging
     * requests.  For now we'll handle this by placing the Dalvik offset
     * in the CompilationUnit struct before codegen for each instruction.
     * The low-level LIR creation utilites will pull it from here.  Rework this.
     */
    DexOffset current_dalvik_offset_;
    size_t estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
    RegisterPool* reg_pool_;
    /*
     * Sanity checking for the register temp tracking.  The same ssa
     * name should never be associated with one temp register per
     * instruction compilation.
     */
    int live_sreg_;
    CodeBuffer code_buffer_;
    // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
    std::vector<uint8_t> encoded_mapping_table_;
    std::vector<uint32_t> core_vmap_table_;
    std::vector<uint32_t> fp_vmap_table_;
    std::vector<uint8_t> native_gc_map_;
    int num_core_spills_;
    int num_fp_spills_;
    int frame_size_;
    unsigned int core_spill_mask_;
    unsigned int fp_spill_mask_;
    LIR* first_lir_insn_;
    LIR* last_lir_insn_;
};  // Class Mir2Lir

}  // namespace art

#endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_