Commit 8b303011 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140829' into staging



target-arm queue:
 * support PMCCNTR in ARMv8
 * various GIC fixes and cleanups
 * Correct Cortex-A57 ISAR5 and AA64ISAR0 ID register values
 * Fix regression that disabled VFP for ARMv5 CPUs
 * Update to upstream VIXL 1.5

# gpg: Signature made Fri 29 Aug 2014 15:34:47 BST using RSA key ID 14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"

* remotes/pmaydell/tags/pull-target-arm-20140829:
  target-arm: Implement pmccfiltr_write function
  target-arm: Remove old code and replace with new functions
  target-arm: Implement pmccntr_sync function
  target-arm: Add arm_ccnt_enabled function
  target-arm: Implement PMCCNTR_EL0 and related registers
  arm: Implement PMCCNTR 32b read-modify-write
  target-arm: Make the ARM PMCCNTR register 64-bit
  hw/intc/arm_gic: honor target mask in gic_update()
  aarch64: raise max_cpus to 8
  arm_gic: Use GIC_NR_SGIS constant
  arm_gic: Do not force PPIs to edge-triggered mode
  arm_gic: GICD_ICFGR: Write model only for pre v1 GICs
  arm_gic: Fix read of GICD_ICFGR
  target-arm: Correct Cortex-A57 ISAR5 and AA64ISAR0 ID register values
  target-arm: Fix regression that disabled VFP for ARMv5 CPUs
  disas/libvixl: Update to upstream VIXL 1.5

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents d9aa6885 0614601c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
The code in this directory is a subset of libvixl:
 https://github.com/armvixl/vixl
(specifically, it is the set of files needed for disassembly only,
taken from libvixl 1.4).
taken from libvixl 1.5).
Bugfixes should preferably be sent upstream initially.

The disassembler does not currently support the entire A64 instruction
+322 −41
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#define VIXL_A64_ASSEMBLER_A64_H_

#include <list>
#include <stack>

#include "globals.h"
#include "utils.h"
@@ -574,34 +575,107 @@ class MemOperand {

class Label {
 public:
  Label() : is_bound_(false), link_(NULL), target_(NULL) {}
  Label() : location_(kLocationUnbound) {}
  ~Label() {
    // If the label has been linked to, it needs to be bound to a target.
    VIXL_ASSERT(!IsLinked() || IsBound());
  }

  inline Instruction* link() const { return link_; }
  inline Instruction* target() const { return target_; }
  inline bool IsBound() const { return location_ >= 0; }
  inline bool IsLinked() const { return !links_.empty(); }

  inline bool IsBound() const { return is_bound_; }
  inline bool IsLinked() const { return link_ != NULL; }
 private:
  // The list of linked instructions is stored in a stack-like structure. We
  // don't use std::stack directly because it's slow for the common case where
  // only one or two instructions refer to a label, and labels themselves are
  // short-lived. This class behaves like std::stack, but the first few links
  // are preallocated (configured by kPreallocatedLinks).
  //
  // If more than N links are required, this falls back to std::stack.
  class LinksStack {
   public:
    LinksStack() : size_(0), links_extended_(NULL) {}
    ~LinksStack() {
      delete links_extended_;
    }

    size_t size() const {
      return size_;
    }

    bool empty() const {
      return size_ == 0;
    }

    void push(ptrdiff_t value) {
      if (size_ < kPreallocatedLinks) {
        links_[size_] = value;
      } else {
        if (links_extended_ == NULL) {
          links_extended_ = new std::stack<ptrdiff_t>();
        }
        VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
        links_extended_->push(value);
      }
      size_++;
    }

  inline void set_link(Instruction* new_link) { link_ = new_link; }
    ptrdiff_t top() const {
      return (size_ <= kPreallocatedLinks) ? links_[size_ - 1]
                                           : links_extended_->top();
    }

  static const int kEndOfChain = 0;
    void pop() {
      size_--;
      if (size_ >= kPreallocatedLinks) {
        links_extended_->pop();
        VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
      }
    }

   private:
  // Indicates if the label has been bound, ie its location is fixed.
  bool is_bound_;
  // Branches instructions branching to this label form a chained list, with
  // their offset indicating where the next instruction is located.
  // link_ points to the latest branch instruction generated branching to this
  // branch.
  // If link_ is not NULL, the label has been linked to.
  Instruction* link_;
    static const size_t kPreallocatedLinks = 4;

    size_t size_;
    ptrdiff_t links_[kPreallocatedLinks];
    std::stack<ptrdiff_t> * links_extended_;
  };

  inline ptrdiff_t location() const { return location_; }

  inline void Bind(ptrdiff_t location) {
    // Labels can only be bound once.
    VIXL_ASSERT(!IsBound());
    location_ = location;
  }

  inline void AddLink(ptrdiff_t instruction) {
    // If a label is bound, the assembler already has the information it needs
    // to write the instruction, so there is no need to add it to links_.
    VIXL_ASSERT(!IsBound());
    links_.push(instruction);
  }

  inline ptrdiff_t GetAndRemoveNextLink() {
    VIXL_ASSERT(IsLinked());
    ptrdiff_t link = links_.top();
    links_.pop();
    return link;
  }

  // The offsets of the instructions that have linked to this label.
  LinksStack links_;
  // The label location.
  Instruction* target_;
  ptrdiff_t location_;

  static const ptrdiff_t kLocationUnbound = -1;

  // It is not safe to copy labels, so disable the copy constructor by declaring
  // it private (without an implementation).
  Label(const Label&);

  // The Assembler class is responsible for binding and linking labels, since
  // the stored offsets need to be consistent with the Assembler's buffer.
  friend class Assembler;
};

@@ -635,10 +709,49 @@ class Literal {
};


// Control whether or not position-independent code should be emitted.
enum PositionIndependentCodeOption {
  // All code generated will be position-independent; all branches and
  // references to labels generated with the Label class will use PC-relative
  // addressing.
  PositionIndependentCode,

  // Allow VIXL to generate code that refers to absolute addresses. With this
  // option, it will not be possible to copy the code buffer and run it from a
  // different address; code must be generated in its final location.
  PositionDependentCode,

  // Allow VIXL to assume that the bottom 12 bits of the address will be
  // constant, but that the top 48 bits may change. This allows `adrp` to
  // function in systems which copy code between pages, but otherwise maintain
  // 4KB page alignment.
  PageOffsetDependentCode
};


// Control how scaled- and unscaled-offset loads and stores are generated.
enum LoadStoreScalingOption {
  // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
  // register-offset, pre-index or post-index instructions if necessary.
  PreferScaledOffset,

  // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
  // register-offset, pre-index or post-index instructions if necessary.
  PreferUnscaledOffset,

  // Require scaled-immediate-offset instructions.
  RequireScaledOffset,

  // Require unscaled-immediate-offset instructions.
  RequireUnscaledOffset
};


// Assembler.
class Assembler {
 public:
  Assembler(byte* buffer, unsigned buffer_size);
  Assembler(byte* buffer, unsigned buffer_size,
            PositionIndependentCodeOption pic = PositionIndependentCode);

  // The destructor asserts that one of the following is true:
  //  * The Assembler object has not been used.
@@ -662,12 +775,15 @@ class Assembler {
  // Label.
  // Bind a label to the current PC.
  void bind(Label* label);
  int UpdateAndGetByteOffsetTo(Label* label);
  inline int UpdateAndGetInstructionOffsetTo(Label* label) {
    VIXL_ASSERT(Label::kEndOfChain == 0);
    return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2;
  }

  // Return the address of a bound label.
  template <typename T>
  inline T GetLabelAddress(const Label * label) {
    VIXL_ASSERT(label->IsBound());
    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
    VIXL_STATIC_ASSERT(sizeof(*buffer_) == 1);
    return reinterpret_cast<T>(buffer_ + label->location());
  }

  // Instruction set functions.

@@ -733,6 +849,12 @@ class Assembler {
  // Calculate the address of a PC offset.
  void adr(const Register& rd, int imm21);

  // Calculate the page address of a label.
  void adrp(const Register& rd, Label* label);

  // Calculate the page address of a PC offset.
  void adrp(const Register& rd, int imm21);

  // Data Processing instructions.
  // Add.
  void add(const Register& rd,
@@ -1112,31 +1234,76 @@ class Assembler {

  // Memory instructions.
  // Load integer or FP register.
  void ldr(const CPURegister& rt, const MemOperand& src);
  void ldr(const CPURegister& rt, const MemOperand& src,
           LoadStoreScalingOption option = PreferScaledOffset);

  // Store integer or FP register.
  void str(const CPURegister& rt, const MemOperand& dst);
  void str(const CPURegister& rt, const MemOperand& dst,
           LoadStoreScalingOption option = PreferScaledOffset);

  // Load word with sign extension.
  void ldrsw(const Register& rt, const MemOperand& src);
  void ldrsw(const Register& rt, const MemOperand& src,
             LoadStoreScalingOption option = PreferScaledOffset);

  // Load byte.
  void ldrb(const Register& rt, const MemOperand& src);
  void ldrb(const Register& rt, const MemOperand& src,
            LoadStoreScalingOption option = PreferScaledOffset);

  // Store byte.
  void strb(const Register& rt, const MemOperand& dst);
  void strb(const Register& rt, const MemOperand& dst,
            LoadStoreScalingOption option = PreferScaledOffset);

  // Load byte with sign extension.
  void ldrsb(const Register& rt, const MemOperand& src);
  void ldrsb(const Register& rt, const MemOperand& src,
             LoadStoreScalingOption option = PreferScaledOffset);

  // Load half-word.
  void ldrh(const Register& rt, const MemOperand& src);
  void ldrh(const Register& rt, const MemOperand& src,
            LoadStoreScalingOption option = PreferScaledOffset);

  // Store half-word.
  void strh(const Register& rt, const MemOperand& dst);
  void strh(const Register& rt, const MemOperand& dst,
            LoadStoreScalingOption option = PreferScaledOffset);

  // Load half-word with sign extension.
  void ldrsh(const Register& rt, const MemOperand& src);
  void ldrsh(const Register& rt, const MemOperand& src,
             LoadStoreScalingOption option = PreferScaledOffset);

  // Load integer or FP register (with unscaled offset).
  void ldur(const CPURegister& rt, const MemOperand& src,
            LoadStoreScalingOption option = PreferUnscaledOffset);

  // Store integer or FP register (with unscaled offset).
  void stur(const CPURegister& rt, const MemOperand& src,
            LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load word with sign extension.
  void ldursw(const Register& rt, const MemOperand& src,
              LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load byte (with unscaled offset).
  void ldurb(const Register& rt, const MemOperand& src,
             LoadStoreScalingOption option = PreferUnscaledOffset);

  // Store byte (with unscaled offset).
  void sturb(const Register& rt, const MemOperand& dst,
             LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load byte with sign extension (and unscaled offset).
  void ldursb(const Register& rt, const MemOperand& src,
              LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load half-word (with unscaled offset).
  void ldurh(const Register& rt, const MemOperand& src,
             LoadStoreScalingOption option = PreferUnscaledOffset);

  // Store half-word (with unscaled offset).
  void sturh(const Register& rt, const MemOperand& dst,
             LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load half-word with sign extension (and unscaled offset).
  void ldursh(const Register& rt, const MemOperand& src,
              LoadStoreScalingOption option = PreferUnscaledOffset);

  // Load integer or FP register pair.
  void ldp(const CPURegister& rt, const CPURegister& rt2,
@@ -1166,6 +1333,79 @@ class Assembler {
  // Load single precision floating point literal to FP register.
  void ldr(const FPRegister& ft, float imm);

  // Store exclusive byte.
  void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);

  // Store exclusive half-word.
  void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);

  // Store exclusive register.
  void stxr(const Register& rs, const Register& rt, const MemOperand& dst);

  // Load exclusive byte.
  void ldxrb(const Register& rt, const MemOperand& src);

  // Load exclusive half-word.
  void ldxrh(const Register& rt, const MemOperand& src);

  // Load exclusive register.
  void ldxr(const Register& rt, const MemOperand& src);

  // Store exclusive register pair.
  void stxp(const Register& rs,
            const Register& rt,
            const Register& rt2,
            const MemOperand& dst);

  // Load exclusive register pair.
  void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);

  // Store-release exclusive byte.
  void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);

  // Store-release exclusive half-word.
  void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);

  // Store-release exclusive register.
  void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);

  // Load-acquire exclusive byte.
  void ldaxrb(const Register& rt, const MemOperand& src);

  // Load-acquire exclusive half-word.
  void ldaxrh(const Register& rt, const MemOperand& src);

  // Load-acquire exclusive register.
  void ldaxr(const Register& rt, const MemOperand& src);

  // Store-release exclusive register pair.
  void stlxp(const Register& rs,
             const Register& rt,
             const Register& rt2,
             const MemOperand& dst);

  // Load-acquire exclusive register pair.
  void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);

  // Store-release byte.
  void stlrb(const Register& rt, const MemOperand& dst);

  // Store-release half-word.
  void stlrh(const Register& rt, const MemOperand& dst);

  // Store-release register.
  void stlr(const Register& rt, const MemOperand& dst);

  // Load-acquire byte.
  void ldarb(const Register& rt, const MemOperand& src);

  // Load-acquire half-word.
  void ldarh(const Register& rt, const MemOperand& src);

  // Load-acquire register.
  void ldar(const Register& rt, const MemOperand& src);


  // Move instructions. The default shift of -1 indicates that the move
  // instruction will calculate an appropriate 16-bit immediate and left shift
  // that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -1214,6 +1454,9 @@ class Assembler {
  // System hint.
  void hint(SystemHint code);

  // Clear exclusive monitor.
  void clrex(int imm4 = 0xf);

  // Data memory barrier.
  void dmb(BarrierDomain domain, BarrierType type);

@@ -1429,6 +1672,11 @@ class Assembler {
    return rt2.code() << Rt2_offset;
  }

  static Instr Rs(CPURegister rs) {
    VIXL_ASSERT(rs.code() != kSPRegInternalCode);
    return rs.code() << Rs_offset;
  }

  // These encoding functions allow the stack pointer to be encoded, and
  // disallow the zero register.
  static Instr RdSP(Register rd) {
@@ -1619,6 +1867,11 @@ class Assembler {
    return imm7 << ImmHint_offset;
  }

  static Instr CRm(int imm4) {
    VIXL_ASSERT(is_uint4(imm4));
    return imm4 << CRm_offset;
  }

  static Instr ImmBarrierDomain(int imm2) {
    VIXL_ASSERT(is_uint2(imm2));
    return imm2 << ImmBarrierDomain_offset;
@@ -1660,16 +1913,20 @@ class Assembler {
  }

  // Size of the code generated in bytes
  uint64_t SizeOfCodeGenerated() const {
  size_t SizeOfCodeGenerated() const {
    VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
    return pc_ - buffer_;
  }

  // Size of the code generated since label to the current position.
  uint64_t SizeOfCodeGeneratedSince(Label* label) const {
  size_t SizeOfCodeGeneratedSince(Label* label) const {
    size_t pc_offset = SizeOfCodeGenerated();

    VIXL_ASSERT(label->IsBound());
    VIXL_ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
    return pc_ - label->target();
    VIXL_ASSERT(pc_offset >= static_cast<size_t>(label->location()));
    VIXL_ASSERT(pc_offset < buffer_size_);

    return pc_offset - label->location();
  }


@@ -1693,6 +1950,15 @@ class Assembler {
  void EmitLiteralPool(LiteralPoolEmitOption option = NoJumpRequired);
  size_t LiteralPoolSize();

  inline PositionIndependentCodeOption pic() {
    return pic_;
  }

  inline bool AllowPageOffsetDependentCode() {
    return (pic() == PageOffsetDependentCode) ||
           (pic() == PositionDependentCode);
  }

 protected:
  inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const {
    return reg.Is64Bits() ? xzr : wzr;
@@ -1701,7 +1967,8 @@ class Assembler {

  void LoadStore(const CPURegister& rt,
                 const MemOperand& addr,
                 LoadStoreOp op);
                 LoadStoreOp op,
                 LoadStoreScalingOption option = PreferScaledOffset);
  static bool IsImmLSUnscaled(ptrdiff_t offset);
  static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);

@@ -1717,9 +1984,9 @@ class Assembler {
                        LogicalOp op);
  static bool IsImmLogical(uint64_t value,
                           unsigned width,
                           unsigned* n,
                           unsigned* imm_s,
                           unsigned* imm_r);
                           unsigned* n = NULL,
                           unsigned* imm_s = NULL,
                           unsigned* imm_r = NULL);

  void ConditionalCompare(const Register& rn,
                          const Operand& operand,
@@ -1823,6 +2090,17 @@ class Assembler {

  void RecordLiteral(int64_t imm, unsigned size);

  // Link the current (not-yet-emitted) instruction to the specified label, then
  // return an offset to be encoded in the instruction. If the label is not yet
  // bound, an offset of 0 is returned.
  ptrdiff_t LinkAndGetByteOffsetTo(Label * label);
  ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label);
  ptrdiff_t LinkAndGetPageOffsetTo(Label * label);

  // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
  template <int element_size>
  ptrdiff_t LinkAndGetOffsetTo(Label* label);

  // Emit the instruction at pc_.
  void Emit(Instr instruction) {
    VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
@@ -1864,12 +2142,15 @@ class Assembler {
  // The buffer into which code and relocation info are generated.
  Instruction* buffer_;
  // Buffer size, in bytes.
  unsigned buffer_size_;
  size_t buffer_size_;
  Instruction* pc_;
  std::list<Literal*> literals_;
  Instruction* next_literal_pool_check_;
  unsigned literal_pool_monitor_;

  PositionIndependentCodeOption pic_;

  friend class Label;
  friend class BlockLiteralPoolScope;

#ifdef DEBUG
+60 −8
Original line number Diff line number Diff line
@@ -50,9 +50,9 @@ V_(Rd, 4, 0, Bits) /* Destination register. */ \
V_(Rn, 9, 5, Bits)                        /* First source register.       */   \
V_(Rm, 20, 16, Bits)                      /* Second source register.      */   \
V_(Ra, 14, 10, Bits)                      /* Third source register.       */   \
V_(Rt, 4, 0, Bits)                        /* Load dest / store source. */      \
V_(Rt2, 14, 10, Bits)                     /* Load second dest /        */      \
                                         /* store second source.      */       \
V_(Rt, 4, 0, Bits)                        /* Load/store register.         */   \
V_(Rt2, 14, 10, Bits)                     /* Load/store second register.  */   \
V_(Rs, 20, 16, Bits)                      /* Exclusive access status.     */   \
V_(PrefetchMode, 4, 0, Bits)                                                   \
                                                                               \
/* Common bits */                                                              \
@@ -126,6 +126,13 @@ V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits)                                                         \
V_(CRn, 15, 12, Bits)                                                          \
V_(CRm, 11, 8, Bits)                                                           \
                                                                               \
/* Load-/store-exclusive */                                                    \
V_(LdStXLoad, 22, 22, Bits)                                                    \
V_(LdStXNotExclusive, 23, 23, Bits)                                            \
V_(LdStXAcquireRelease, 15, 15, Bits)                                          \
V_(LdStXSizeLog2, 31, 30, Bits)                                                \
V_(LdStXPair, 21, 21, Bits)                                                    \


#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_)                                    \
@@ -585,6 +592,13 @@ enum MemBarrierOp {
  ISB             = MemBarrierFixed | 0x00000040
};

enum SystemExclusiveMonitorOp {
  SystemExclusiveMonitorFixed = 0xD503305F,
  SystemExclusiveMonitorFMask = 0xFFFFF0FF,
  SystemExclusiveMonitorMask  = 0xFFFFF0FF,
  CLREX                       = SystemExclusiveMonitorFixed
};

// Any load or store.
enum LoadStoreAnyOp {
  LoadStoreAnyFMask = 0x0a000000,
@@ -756,6 +770,44 @@ enum LoadStoreRegisterOffset {
  #undef LOAD_STORE_REGISTER_OFFSET
};

enum LoadStoreExclusive {
  LoadStoreExclusiveFixed = 0x08000000,
  LoadStoreExclusiveFMask = 0x3F000000,
  LoadStoreExclusiveMask  = 0xFFE08000,
  STXRB_w  = LoadStoreExclusiveFixed | 0x00000000,
  STXRH_w  = LoadStoreExclusiveFixed | 0x40000000,
  STXR_w   = LoadStoreExclusiveFixed | 0x80000000,
  STXR_x   = LoadStoreExclusiveFixed | 0xC0000000,
  LDXRB_w  = LoadStoreExclusiveFixed | 0x00400000,
  LDXRH_w  = LoadStoreExclusiveFixed | 0x40400000,
  LDXR_w   = LoadStoreExclusiveFixed | 0x80400000,
  LDXR_x   = LoadStoreExclusiveFixed | 0xC0400000,
  STXP_w   = LoadStoreExclusiveFixed | 0x80200000,
  STXP_x   = LoadStoreExclusiveFixed | 0xC0200000,
  LDXP_w   = LoadStoreExclusiveFixed | 0x80600000,
  LDXP_x   = LoadStoreExclusiveFixed | 0xC0600000,
  STLXRB_w = LoadStoreExclusiveFixed | 0x00008000,
  STLXRH_w = LoadStoreExclusiveFixed | 0x40008000,
  STLXR_w  = LoadStoreExclusiveFixed | 0x80008000,
  STLXR_x  = LoadStoreExclusiveFixed | 0xC0008000,
  LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000,
  LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000,
  LDAXR_w  = LoadStoreExclusiveFixed | 0x80408000,
  LDAXR_x  = LoadStoreExclusiveFixed | 0xC0408000,
  STLXP_w  = LoadStoreExclusiveFixed | 0x80208000,
  STLXP_x  = LoadStoreExclusiveFixed | 0xC0208000,
  LDAXP_w  = LoadStoreExclusiveFixed | 0x80608000,
  LDAXP_x  = LoadStoreExclusiveFixed | 0xC0608000,
  STLRB_w  = LoadStoreExclusiveFixed | 0x00808000,
  STLRH_w  = LoadStoreExclusiveFixed | 0x40808000,
  STLR_w   = LoadStoreExclusiveFixed | 0x80808000,
  STLR_x   = LoadStoreExclusiveFixed | 0xC0808000,
  LDARB_w  = LoadStoreExclusiveFixed | 0x00C08000,
  LDARH_w  = LoadStoreExclusiveFixed | 0x40C08000,
  LDAR_w   = LoadStoreExclusiveFixed | 0x80C08000,
  LDAR_x   = LoadStoreExclusiveFixed | 0xC0C08000
};

// Conditional compare.
enum ConditionalCompareOp {
  ConditionalCompareMask = 0x60000000,
+27 −0
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#define VIXL_CPU_A64_H

#include "globals.h"
#include "instructions-a64.h"

namespace vixl {

@@ -42,6 +43,32 @@ class CPU {
  // safely run.
  static void EnsureIAndDCacheCoherency(void *address, size_t length);

  // Handle tagged pointers.
  template <typename T>
  static T SetPointerTag(T pointer, uint64_t tag) {
    VIXL_ASSERT(is_uintn(kAddressTagWidth, tag));

    // Use C-style casts to get static_cast behaviour for integral types (T),
    // and reinterpret_cast behaviour for other types.

    uint64_t raw = (uint64_t)pointer;
    VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));

    raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
    return (T)raw;
  }

  template <typename T>
  static uint64_t GetPointerTag(T pointer) {
    // Use C-style casts to get static_cast behaviour for integral types (T),
    // and reinterpret_cast behaviour for other types.

    uint64_t raw = (uint64_t)pointer;
    VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));

    return (raw & kAddressTagMask) >> kAddressTagOffset;
  }

 private:
  // Return the content of the cache type register.
  static uint32_t GetCacheType();
+7 −8
Original line number Diff line number Diff line
@@ -280,8 +280,7 @@ void Decoder::DecodeLoadStore(Instruction* instr) {
    if (instr->Bit(28) == 0) {
      if (instr->Bit(29) == 0) {
        if (instr->Bit(26) == 0) {
          // TODO: VisitLoadStoreExclusive.
          VisitUnimplemented(instr);
          VisitLoadStoreExclusive(instr);
        } else {
          DecodeAdvSIMDLoadStore(instr);
        }
Loading