target-arm queue:

* support PMCCNTR in ARMv8
  * various GIC fixes and cleanups
  * Correct Cortex-A57 ISAR5 and AA64ISAR0 ID register values
  * Fix regression that disabled VFP for ARMv5 CPUs
  * Update to upstream VIXL 1.5
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJUAI+HAAoJEDwlJe0UNgzePpUP/iPGrfXPNsfa4GuCUm3L2rSu
 8JmfT5K9bcjOGRRFwFtqgDXNiazoAtW2RXIkPiK5zK6aH5j17ls6jTRcybt1a054
 HT0m8FfIOO8v1fCIuTvvUbr4oc9KjqagEvvmaFI5PDWltMUhDvqvpPaNhGaFdF+z
 msSB+pEX8VBwQdJGuCIXfiWvv//Q+yFyOsyoDG0tOeqUqC2y9QVx5C+ungWqX7uV
 zmw+oqDBjmMtoCGPjMVDJCkC7UGsKtle8QqWCFCgRJmc9EJt8QPg0i5xZYG4Qd5J
 E/SF+Px3k98xA+hVE4ssFJY1ujmHhpRouNI4Q6WHYbk/R4T+G6nCKnmJLqv7Ajs8
 HHjfauYjymPz127VBShdKqkBc5zR3fjKTssVCZ4KvjMqO6YWosQo0TIuP7vJGUT6
 OY+4xTWOz+md28LfHCequL/AJdYlN6l456G1vJmAhVlPSJiVsKRMqwFwrHY70ztU
 F/lffZVOU3koIx+CGoUczzTXlnn3oUtUhQM/p5YH5I56TnNq6ddkMfD5Dik5Dluj
 IrBIsH9TU5DgLYhnV24+VFU6fYxCMHP2MjyfZf2KnjCT0nIr7ZnZalgwR9AYRPo0
 XakdNGHjehJygnwc8mfXFRcY42s0p8Ajeapt9blXXZG2VQ5a2W1+IMRAXfyFMMv3
 T1M8AmdnM7Rn/QZK69J7
 =6waG
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140829' into staging

target-arm queue:
 * support PMCCNTR in ARMv8
 * various GIC fixes and cleanups
 * Correct Cortex-A57 ISAR5 and AA64ISAR0 ID register values
 * Fix regression that disabled VFP for ARMv5 CPUs
 * Update to upstream VIXL 1.5

# gpg: Signature made Fri 29 Aug 2014 15:34:47 BST using RSA key ID 14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"

* remotes/pmaydell/tags/pull-target-arm-20140829:
  target-arm: Implement pmccfiltr_write function
  target-arm: Remove old code and replace with new functions
  target-arm: Implement pmccntr_sync function
  target-arm: Add arm_ccnt_enabled function
  target-arm: Implement PMCCNTR_EL0 and related registers
  arm: Implement PMCCNTR 32b read-modify-write
  target-arm: Make the ARM PMCCNTR register 64-bit
  hw/intc/arm_gic: honor target mask in gic_update()
  aarch64: raise max_cpus to 8
  arm_gic: Use GIC_NR_SGIS constant
  arm_gic: Do not force PPIs to edge-triggered mode
  arm_gic: GICD_ICFGR: Write model only for pre v1 GICs
  arm_gic: Fix read of GICD_ICFGR
  target-arm: Correct Cortex-A57 ISAR5 and AA64ISAR0 ID register values
  target-arm: Fix regression that disabled VFP for ARMv5 CPUs
  disas/libvixl: Update to upstream VIXL 1.5

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-08-29 15:48:15 +01:00
commit 8b3030114a
19 changed files with 701 additions and 147 deletions

View file

@ -2,7 +2,7 @@
The code in this directory is a subset of libvixl: The code in this directory is a subset of libvixl:
https://github.com/armvixl/vixl https://github.com/armvixl/vixl
(specifically, it is the set of files needed for disassembly only, (specifically, it is the set of files needed for disassembly only,
taken from libvixl 1.4). taken from libvixl 1.5).
Bugfixes should preferably be sent upstream initially. Bugfixes should preferably be sent upstream initially.
The disassembler does not currently support the entire A64 instruction The disassembler does not currently support the entire A64 instruction

View file

@ -28,6 +28,7 @@
#define VIXL_A64_ASSEMBLER_A64_H_ #define VIXL_A64_ASSEMBLER_A64_H_
#include <list> #include <list>
#include <stack>
#include "globals.h" #include "globals.h"
#include "utils.h" #include "utils.h"
@ -574,34 +575,107 @@ class MemOperand {
class Label { class Label {
public: public:
Label() : is_bound_(false), link_(NULL), target_(NULL) {} Label() : location_(kLocationUnbound) {}
~Label() { ~Label() {
// If the label has been linked to, it needs to be bound to a target. // If the label has been linked to, it needs to be bound to a target.
VIXL_ASSERT(!IsLinked() || IsBound()); VIXL_ASSERT(!IsLinked() || IsBound());
} }
inline Instruction* link() const { return link_; } inline bool IsBound() const { return location_ >= 0; }
inline Instruction* target() const { return target_; } inline bool IsLinked() const { return !links_.empty(); }
inline bool IsBound() const { return is_bound_; }
inline bool IsLinked() const { return link_ != NULL; }
inline void set_link(Instruction* new_link) { link_ = new_link; }
static const int kEndOfChain = 0;
private: private:
// Indicates if the label has been bound, ie its location is fixed. // The list of linked instructions is stored in a stack-like structure. We
bool is_bound_; // don't use std::stack directly because it's slow for the common case where
// Branches instructions branching to this label form a chained list, with // only one or two instructions refer to a label, and labels themselves are
// their offset indicating where the next instruction is located. // short-lived. This class behaves like std::stack, but the first few links
// link_ points to the latest branch instruction generated branching to this // are preallocated (configured by kPreallocatedLinks).
// branch. //
// If link_ is not NULL, the label has been linked to. // If more than N links are required, this falls back to std::stack.
Instruction* link_; class LinksStack {
// The label location. public:
Instruction* target_; LinksStack() : size_(0), links_extended_(NULL) {}
~LinksStack() {
delete links_extended_;
}
size_t size() const {
return size_;
}
bool empty() const {
return size_ == 0;
}
void push(ptrdiff_t value) {
if (size_ < kPreallocatedLinks) {
links_[size_] = value;
} else {
if (links_extended_ == NULL) {
links_extended_ = new std::stack<ptrdiff_t>();
}
VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
links_extended_->push(value);
}
size_++;
}
ptrdiff_t top() const {
return (size_ <= kPreallocatedLinks) ? links_[size_ - 1]
: links_extended_->top();
}
void pop() {
size_--;
if (size_ >= kPreallocatedLinks) {
links_extended_->pop();
VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
}
}
private:
static const size_t kPreallocatedLinks = 4;
size_t size_;
ptrdiff_t links_[kPreallocatedLinks];
std::stack<ptrdiff_t> * links_extended_;
};
inline ptrdiff_t location() const { return location_; }
inline void Bind(ptrdiff_t location) {
// Labels can only be bound once.
VIXL_ASSERT(!IsBound());
location_ = location;
}
inline void AddLink(ptrdiff_t instruction) {
// If a label is bound, the assembler already has the information it needs
// to write the instruction, so there is no need to add it to links_.
VIXL_ASSERT(!IsBound());
links_.push(instruction);
}
inline ptrdiff_t GetAndRemoveNextLink() {
VIXL_ASSERT(IsLinked());
ptrdiff_t link = links_.top();
links_.pop();
return link;
}
// The offsets of the instructions that have linked to this label.
LinksStack links_;
// The label location.
ptrdiff_t location_;
static const ptrdiff_t kLocationUnbound = -1;
// It is not safe to copy labels, so disable the copy constructor by declaring
// it private (without an implementation).
Label(const Label&);
// The Assembler class is responsible for binding and linking labels, since
// the stored offsets need to be consistent with the Assembler's buffer.
friend class Assembler; friend class Assembler;
}; };
@ -635,10 +709,49 @@ class Literal {
}; };
// Control whether or not position-independent code should be emitted.
enum PositionIndependentCodeOption {
// All code generated will be position-independent; all branches and
// references to labels generated with the Label class will use PC-relative
// addressing.
PositionIndependentCode,
// Allow VIXL to generate code that refers to absolute addresses. With this
// option, it will not be possible to copy the code buffer and run it from a
// different address; code must be generated in its final location.
PositionDependentCode,
// Allow VIXL to assume that the bottom 12 bits of the address will be
// constant, but that the top 48 bits may change. This allows `adrp` to
// function in systems which copy code between pages, but otherwise maintain
// 4KB page alignment.
PageOffsetDependentCode
};
// Control how scaled- and unscaled-offset loads and stores are generated.
enum LoadStoreScalingOption {
// Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
// register-offset, pre-index or post-index instructions if necessary.
PreferScaledOffset,
// Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
// register-offset, pre-index or post-index instructions if necessary.
PreferUnscaledOffset,
// Require scaled-immediate-offset instructions.
RequireScaledOffset,
// Require unscaled-immediate-offset instructions.
RequireUnscaledOffset
};
// Assembler. // Assembler.
class Assembler { class Assembler {
public: public:
Assembler(byte* buffer, unsigned buffer_size); Assembler(byte* buffer, unsigned buffer_size,
PositionIndependentCodeOption pic = PositionIndependentCode);
// The destructor asserts that one of the following is true: // The destructor asserts that one of the following is true:
// * The Assembler object has not been used. // * The Assembler object has not been used.
@ -662,12 +775,15 @@ class Assembler {
// Label. // Label.
// Bind a label to the current PC. // Bind a label to the current PC.
void bind(Label* label); void bind(Label* label);
int UpdateAndGetByteOffsetTo(Label* label);
inline int UpdateAndGetInstructionOffsetTo(Label* label) {
VIXL_ASSERT(Label::kEndOfChain == 0);
return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2;
}
// Return the address of a bound label.
template <typename T>
inline T GetLabelAddress(const Label * label) {
VIXL_ASSERT(label->IsBound());
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
VIXL_STATIC_ASSERT(sizeof(*buffer_) == 1);
return reinterpret_cast<T>(buffer_ + label->location());
}
// Instruction set functions. // Instruction set functions.
@ -733,6 +849,12 @@ class Assembler {
// Calculate the address of a PC offset. // Calculate the address of a PC offset.
void adr(const Register& rd, int imm21); void adr(const Register& rd, int imm21);
// Calculate the page address of a label.
void adrp(const Register& rd, Label* label);
// Calculate the page address of a PC offset.
void adrp(const Register& rd, int imm21);
// Data Processing instructions. // Data Processing instructions.
// Add. // Add.
void add(const Register& rd, void add(const Register& rd,
@ -1112,31 +1234,76 @@ class Assembler {
// Memory instructions. // Memory instructions.
// Load integer or FP register. // Load integer or FP register.
void ldr(const CPURegister& rt, const MemOperand& src); void ldr(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store integer or FP register. // Store integer or FP register.
void str(const CPURegister& rt, const MemOperand& dst); void str(const CPURegister& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load word with sign extension. // Load word with sign extension.
void ldrsw(const Register& rt, const MemOperand& src); void ldrsw(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load byte. // Load byte.
void ldrb(const Register& rt, const MemOperand& src); void ldrb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store byte. // Store byte.
void strb(const Register& rt, const MemOperand& dst); void strb(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load byte with sign extension. // Load byte with sign extension.
void ldrsb(const Register& rt, const MemOperand& src); void ldrsb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load half-word. // Load half-word.
void ldrh(const Register& rt, const MemOperand& src); void ldrh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store half-word. // Store half-word.
void strh(const Register& rt, const MemOperand& dst); void strh(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load half-word with sign extension. // Load half-word with sign extension.
void ldrsh(const Register& rt, const MemOperand& src); void ldrsh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load integer or FP register (with unscaled offset).
void ldur(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store integer or FP register (with unscaled offset).
void stur(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load word with sign extension.
void ldursw(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load byte (with unscaled offset).
void ldurb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store byte (with unscaled offset).
void sturb(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load byte with sign extension (and unscaled offset).
void ldursb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load half-word (with unscaled offset).
void ldurh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store half-word (with unscaled offset).
void sturh(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load half-word with sign extension (and unscaled offset).
void ldursh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load integer or FP register pair. // Load integer or FP register pair.
void ldp(const CPURegister& rt, const CPURegister& rt2, void ldp(const CPURegister& rt, const CPURegister& rt2,
@ -1166,6 +1333,79 @@ class Assembler {
// Load single precision floating point literal to FP register. // Load single precision floating point literal to FP register.
void ldr(const FPRegister& ft, float imm); void ldr(const FPRegister& ft, float imm);
// Store exclusive byte.
void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
// Store exclusive half-word.
void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
// Store exclusive register.
void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
// Load exclusive byte.
void ldxrb(const Register& rt, const MemOperand& src);
// Load exclusive half-word.
void ldxrh(const Register& rt, const MemOperand& src);
// Load exclusive register.
void ldxr(const Register& rt, const MemOperand& src);
// Store exclusive register pair.
void stxp(const Register& rs,
const Register& rt,
const Register& rt2,
const MemOperand& dst);
// Load exclusive register pair.
void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
// Store-release exclusive byte.
void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
// Store-release exclusive register.
void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
// Load-acquire exclusive byte.
void ldaxrb(const Register& rt, const MemOperand& src);
// Load-acquire exclusive half-word.
void ldaxrh(const Register& rt, const MemOperand& src);
// Load-acquire exclusive register.
void ldaxr(const Register& rt, const MemOperand& src);
// Store-release exclusive register pair.
void stlxp(const Register& rs,
const Register& rt,
const Register& rt2,
const MemOperand& dst);
// Load-acquire exclusive register pair.
void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
// Store-release byte.
void stlrb(const Register& rt, const MemOperand& dst);
// Store-release half-word.
void stlrh(const Register& rt, const MemOperand& dst);
// Store-release register.
void stlr(const Register& rt, const MemOperand& dst);
// Load-acquire byte.
void ldarb(const Register& rt, const MemOperand& src);
// Load-acquire half-word.
void ldarh(const Register& rt, const MemOperand& src);
// Load-acquire register.
void ldar(const Register& rt, const MemOperand& src);
// Move instructions. The default shift of -1 indicates that the move // Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift // instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift // that is equal to the 64-bit immediate argument. If an explicit left shift
@ -1214,6 +1454,9 @@ class Assembler {
// System hint. // System hint.
void hint(SystemHint code); void hint(SystemHint code);
// Clear exclusive monitor.
void clrex(int imm4 = 0xf);
// Data memory barrier. // Data memory barrier.
void dmb(BarrierDomain domain, BarrierType type); void dmb(BarrierDomain domain, BarrierType type);
@ -1429,6 +1672,11 @@ class Assembler {
return rt2.code() << Rt2_offset; return rt2.code() << Rt2_offset;
} }
static Instr Rs(CPURegister rs) {
VIXL_ASSERT(rs.code() != kSPRegInternalCode);
return rs.code() << Rs_offset;
}
// These encoding functions allow the stack pointer to be encoded, and // These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register. // disallow the zero register.
static Instr RdSP(Register rd) { static Instr RdSP(Register rd) {
@ -1619,6 +1867,11 @@ class Assembler {
return imm7 << ImmHint_offset; return imm7 << ImmHint_offset;
} }
static Instr CRm(int imm4) {
VIXL_ASSERT(is_uint4(imm4));
return imm4 << CRm_offset;
}
static Instr ImmBarrierDomain(int imm2) { static Instr ImmBarrierDomain(int imm2) {
VIXL_ASSERT(is_uint2(imm2)); VIXL_ASSERT(is_uint2(imm2));
return imm2 << ImmBarrierDomain_offset; return imm2 << ImmBarrierDomain_offset;
@ -1660,16 +1913,20 @@ class Assembler {
} }
// Size of the code generated in bytes // Size of the code generated in bytes
uint64_t SizeOfCodeGenerated() const { size_t SizeOfCodeGenerated() const {
VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - buffer_; return pc_ - buffer_;
} }
// Size of the code generated since label to the current position. // Size of the code generated since label to the current position.
uint64_t SizeOfCodeGeneratedSince(Label* label) const { size_t SizeOfCodeGeneratedSince(Label* label) const {
size_t pc_offset = SizeOfCodeGenerated();
VIXL_ASSERT(label->IsBound()); VIXL_ASSERT(label->IsBound());
VIXL_ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_))); VIXL_ASSERT(pc_offset >= static_cast<size_t>(label->location()));
return pc_ - label->target(); VIXL_ASSERT(pc_offset < buffer_size_);
return pc_offset - label->location();
} }
@ -1693,6 +1950,15 @@ class Assembler {
void EmitLiteralPool(LiteralPoolEmitOption option = NoJumpRequired); void EmitLiteralPool(LiteralPoolEmitOption option = NoJumpRequired);
size_t LiteralPoolSize(); size_t LiteralPoolSize();
inline PositionIndependentCodeOption pic() {
return pic_;
}
inline bool AllowPageOffsetDependentCode() {
return (pic() == PageOffsetDependentCode) ||
(pic() == PositionDependentCode);
}
protected: protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const { inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const {
return reg.Is64Bits() ? xzr : wzr; return reg.Is64Bits() ? xzr : wzr;
@ -1701,7 +1967,8 @@ class Assembler {
void LoadStore(const CPURegister& rt, void LoadStore(const CPURegister& rt,
const MemOperand& addr, const MemOperand& addr,
LoadStoreOp op); LoadStoreOp op,
LoadStoreScalingOption option = PreferScaledOffset);
static bool IsImmLSUnscaled(ptrdiff_t offset); static bool IsImmLSUnscaled(ptrdiff_t offset);
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size); static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
@ -1717,9 +1984,9 @@ class Assembler {
LogicalOp op); LogicalOp op);
static bool IsImmLogical(uint64_t value, static bool IsImmLogical(uint64_t value,
unsigned width, unsigned width,
unsigned* n, unsigned* n = NULL,
unsigned* imm_s, unsigned* imm_s = NULL,
unsigned* imm_r); unsigned* imm_r = NULL);
void ConditionalCompare(const Register& rn, void ConditionalCompare(const Register& rn,
const Operand& operand, const Operand& operand,
@ -1823,6 +2090,17 @@ class Assembler {
void RecordLiteral(int64_t imm, unsigned size); void RecordLiteral(int64_t imm, unsigned size);
// Link the current (not-yet-emitted) instruction to the specified label, then
// return an offset to be encoded in the instruction. If the label is not yet
// bound, an offset of 0 is returned.
ptrdiff_t LinkAndGetByteOffsetTo(Label * label);
ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label);
ptrdiff_t LinkAndGetPageOffsetTo(Label * label);
// A common implementation for the LinkAndGet<Type>OffsetTo helpers.
template <int element_size>
ptrdiff_t LinkAndGetOffsetTo(Label* label);
// Emit the instruction at pc_. // Emit the instruction at pc_.
void Emit(Instr instruction) { void Emit(Instr instruction) {
VIXL_STATIC_ASSERT(sizeof(*pc_) == 1); VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
@ -1864,12 +2142,15 @@ class Assembler {
// The buffer into which code and relocation info are generated. // The buffer into which code and relocation info are generated.
Instruction* buffer_; Instruction* buffer_;
// Buffer size, in bytes. // Buffer size, in bytes.
unsigned buffer_size_; size_t buffer_size_;
Instruction* pc_; Instruction* pc_;
std::list<Literal*> literals_; std::list<Literal*> literals_;
Instruction* next_literal_pool_check_; Instruction* next_literal_pool_check_;
unsigned literal_pool_monitor_; unsigned literal_pool_monitor_;
PositionIndependentCodeOption pic_;
friend class Label;
friend class BlockLiteralPoolScope; friend class BlockLiteralPoolScope;
#ifdef DEBUG #ifdef DEBUG

View file

@ -46,13 +46,13 @@ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
#define INSTRUCTION_FIELDS_LIST(V_) \ #define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \ /* Register fields */ \
V_(Rd, 4, 0, Bits) /* Destination register. */ \ V_(Rd, 4, 0, Bits) /* Destination register. */ \
V_(Rn, 9, 5, Bits) /* First source register. */ \ V_(Rn, 9, 5, Bits) /* First source register. */ \
V_(Rm, 20, 16, Bits) /* Second source register. */ \ V_(Rm, 20, 16, Bits) /* Second source register. */ \
V_(Ra, 14, 10, Bits) /* Third source register. */ \ V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \ V_(Rt, 4, 0, Bits) /* Load/store register. */ \
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \ V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
/* store second source. */ \ V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
V_(PrefetchMode, 4, 0, Bits) \ V_(PrefetchMode, 4, 0, Bits) \
\ \
/* Common bits */ \ /* Common bits */ \
@ -126,6 +126,13 @@ V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \ V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \ V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits) \ V_(CRm, 11, 8, Bits) \
\
/* Load-/store-exclusive */ \
V_(LdStXLoad, 22, 22, Bits) \
V_(LdStXNotExclusive, 23, 23, Bits) \
V_(LdStXAcquireRelease, 15, 15, Bits) \
V_(LdStXSizeLog2, 31, 30, Bits) \
V_(LdStXPair, 21, 21, Bits) \
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
@ -585,6 +592,13 @@ enum MemBarrierOp {
ISB = MemBarrierFixed | 0x00000040 ISB = MemBarrierFixed | 0x00000040
}; };
enum SystemExclusiveMonitorOp {
SystemExclusiveMonitorFixed = 0xD503305F,
SystemExclusiveMonitorFMask = 0xFFFFF0FF,
SystemExclusiveMonitorMask = 0xFFFFF0FF,
CLREX = SystemExclusiveMonitorFixed
};
// Any load or store. // Any load or store.
enum LoadStoreAnyOp { enum LoadStoreAnyOp {
LoadStoreAnyFMask = 0x0a000000, LoadStoreAnyFMask = 0x0a000000,
@ -702,7 +716,7 @@ enum LoadStoreUnscaledOffsetOp {
// Load/store (post, pre, offset and unsigned.) // Load/store (post, pre, offset and unsigned.)
enum LoadStoreOp { enum LoadStoreOp {
LoadStoreOpMask = 0xC4C00000, LoadStoreOpMask = 0xC4C00000,
#define LOAD_STORE(A, B, C, D) \ #define LOAD_STORE(A, B, C, D) \
A##B##_##C = D A##B##_##C = D
LOAD_STORE_OP_LIST(LOAD_STORE), LOAD_STORE_OP_LIST(LOAD_STORE),
@ -756,6 +770,44 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET #undef LOAD_STORE_REGISTER_OFFSET
}; };
enum LoadStoreExclusive {
LoadStoreExclusiveFixed = 0x08000000,
LoadStoreExclusiveFMask = 0x3F000000,
LoadStoreExclusiveMask = 0xFFE08000,
STXRB_w = LoadStoreExclusiveFixed | 0x00000000,
STXRH_w = LoadStoreExclusiveFixed | 0x40000000,
STXR_w = LoadStoreExclusiveFixed | 0x80000000,
STXR_x = LoadStoreExclusiveFixed | 0xC0000000,
LDXRB_w = LoadStoreExclusiveFixed | 0x00400000,
LDXRH_w = LoadStoreExclusiveFixed | 0x40400000,
LDXR_w = LoadStoreExclusiveFixed | 0x80400000,
LDXR_x = LoadStoreExclusiveFixed | 0xC0400000,
STXP_w = LoadStoreExclusiveFixed | 0x80200000,
STXP_x = LoadStoreExclusiveFixed | 0xC0200000,
LDXP_w = LoadStoreExclusiveFixed | 0x80600000,
LDXP_x = LoadStoreExclusiveFixed | 0xC0600000,
STLXRB_w = LoadStoreExclusiveFixed | 0x00008000,
STLXRH_w = LoadStoreExclusiveFixed | 0x40008000,
STLXR_w = LoadStoreExclusiveFixed | 0x80008000,
STLXR_x = LoadStoreExclusiveFixed | 0xC0008000,
LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000,
LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000,
LDAXR_w = LoadStoreExclusiveFixed | 0x80408000,
LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000,
STLXP_w = LoadStoreExclusiveFixed | 0x80208000,
STLXP_x = LoadStoreExclusiveFixed | 0xC0208000,
LDAXP_w = LoadStoreExclusiveFixed | 0x80608000,
LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000,
STLRB_w = LoadStoreExclusiveFixed | 0x00808000,
STLRH_w = LoadStoreExclusiveFixed | 0x40808000,
STLR_w = LoadStoreExclusiveFixed | 0x80808000,
STLR_x = LoadStoreExclusiveFixed | 0xC0808000,
LDARB_w = LoadStoreExclusiveFixed | 0x00C08000,
LDARH_w = LoadStoreExclusiveFixed | 0x40C08000,
LDAR_w = LoadStoreExclusiveFixed | 0x80C08000,
LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000
};
// Conditional compare. // Conditional compare.
enum ConditionalCompareOp { enum ConditionalCompareOp {
ConditionalCompareMask = 0x60000000, ConditionalCompareMask = 0x60000000,

View file

@ -28,6 +28,7 @@
#define VIXL_CPU_A64_H #define VIXL_CPU_A64_H
#include "globals.h" #include "globals.h"
#include "instructions-a64.h"
namespace vixl { namespace vixl {
@ -42,6 +43,32 @@ class CPU {
// safely run. // safely run.
static void EnsureIAndDCacheCoherency(void *address, size_t length); static void EnsureIAndDCacheCoherency(void *address, size_t length);
// Handle tagged pointers.
template <typename T>
static T SetPointerTag(T pointer, uint64_t tag) {
VIXL_ASSERT(is_uintn(kAddressTagWidth, tag));
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
return (T)raw;
}
template <typename T>
static uint64_t GetPointerTag(T pointer) {
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
return (raw & kAddressTagMask) >> kAddressTagOffset;
}
private: private:
// Return the content of the cache type register. // Return the content of the cache type register.
static uint32_t GetCacheType(); static uint32_t GetCacheType();

View file

@ -171,9 +171,9 @@ void Decoder::DecodePCRelAddressing(Instruction* instr) {
void Decoder::DecodeBranchSystemException(Instruction* instr) { void Decoder::DecodeBranchSystemException(Instruction* instr) {
VIXL_ASSERT((instr->Bits(27, 24) == 0x4) || VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) || (instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) || (instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) ); (instr->Bits(27, 24) == 0x7) );
switch (instr->Bits(31, 29)) { switch (instr->Bits(31, 29)) {
case 0: case 0:
@ -272,16 +272,15 @@ void Decoder::DecodeBranchSystemException(Instruction* instr) {
void Decoder::DecodeLoadStore(Instruction* instr) { void Decoder::DecodeLoadStore(Instruction* instr) {
VIXL_ASSERT((instr->Bits(27, 24) == 0x8) || VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) || (instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) || (instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) ); (instr->Bits(27, 24) == 0xD) );
if (instr->Bit(24) == 0) { if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) { if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) { if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) { if (instr->Bit(26) == 0) {
// TODO: VisitLoadStoreExclusive. VisitLoadStoreExclusive(instr);
VisitUnimplemented(instr);
} else { } else {
DecodeAdvSIMDLoadStore(instr); DecodeAdvSIMDLoadStore(instr);
} }

View file

@ -59,6 +59,7 @@
V(LoadStorePreIndex) \ V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \ V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \ V(LoadStoreUnsignedOffset) \
V(LoadStoreExclusive) \
V(LogicalShifted) \ V(LogicalShifted) \
V(AddSubShifted) \ V(AddSubShifted) \
V(AddSubExtended) \ V(AddSubExtended) \

View file

@ -24,6 +24,7 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
#include "a64/disasm-a64.h" #include "a64/disasm-a64.h"
namespace vixl { namespace vixl {
@ -529,7 +530,7 @@ void Disassembler::VisitExtract(Instruction* instr) {
void Disassembler::VisitPCRelAddressing(Instruction* instr) { void Disassembler::VisitPCRelAddressing(Instruction* instr) {
switch (instr->Mask(PCRelAddressingMask)) { switch (instr->Mask(PCRelAddressingMask)) {
case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break; case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
// ADRP is not implemented. case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
default: Format(instr, "unimplemented", "(PCRelAddressing)"); default: Format(instr, "unimplemented", "(PCRelAddressing)");
} }
} }
@ -943,6 +944,49 @@ void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
} }
void Disassembler::VisitLoadStoreExclusive(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form;
switch (instr->Mask(LoadStoreExclusiveMask)) {
case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break;
case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break;
case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break;
case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break;
case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break;
case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break;
case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break;
case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break;
case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break;
case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break;
case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break;
case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break;
case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break;
case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break;
case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break;
case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break;
case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break;
case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break;
case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break;
case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break;
default: form = "(LoadStoreExclusive)";
}
Format(instr, mnemonic, form);
}
void Disassembler::VisitFPCompare(Instruction* instr) { void Disassembler::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented"; const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm"; const char *form = "'Fn, 'Fm";
@ -1162,7 +1206,15 @@ void Disassembler::VisitSystem(Instruction* instr) {
const char *mnemonic = "unimplemented"; const char *mnemonic = "unimplemented";
const char *form = "(System)"; const char *form = "(System)";
if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
switch (instr->Mask(SystemExclusiveMonitorMask)) {
case CLREX: {
mnemonic = "clrex";
form = (instr->CRm() == 0xf) ? NULL : "'IX";
break;
}
}
} else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
switch (instr->Mask(SystemSysRegMask)) { switch (instr->Mask(SystemSysRegMask)) {
case MRS: { case MRS: {
mnemonic = "mrs"; mnemonic = "mrs";
@ -1184,7 +1236,6 @@ void Disassembler::VisitSystem(Instruction* instr) {
} }
} }
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) { switch (instr->ImmHint()) {
case NOP: { case NOP: {
mnemonic = "nop"; mnemonic = "nop";
@ -1312,6 +1363,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
case 'n': reg_num = instr->Rn(); break; case 'n': reg_num = instr->Rn(); break;
case 'm': reg_num = instr->Rm(); break; case 'm': reg_num = instr->Rm(); break;
case 'a': reg_num = instr->Ra(); break; case 'a': reg_num = instr->Ra(); break;
case 's': reg_num = instr->Rs(); break;
case 't': { case 't': {
if (format[2] == '2') { if (format[2] == '2') {
reg_num = instr->Rt2(); reg_num = instr->Rt2();
@ -1458,6 +1510,10 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#0x%" PRIx64, instr->ImmException()); AppendToOutput("#0x%" PRIx64, instr->ImmException());
return 6; return 6;
} }
case 'X': { // IX - CLREX instruction.
AppendToOutput("#0x%" PRIx64, instr->CRm());
return 2;
}
default: { default: {
VIXL_UNIMPLEMENTED(); VIXL_UNIMPLEMENTED();
return 0; return 0;
@ -1564,21 +1620,20 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
int Disassembler::SubstitutePCRelAddressField(Instruction* instr, int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
const char* format) { const char* format) {
USE(format); VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`.
VIXL_ASSERT(strncmp(format, "AddrPCRel", 9) == 0); (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
int offset = instr->ImmPCRel(); int64_t offset = instr->ImmPCRel();
Instruction * base = instr;
// Only ADR (AddrPCRelByte) is supported. if (format[9] == 'P') {
VIXL_ASSERT(strcmp(format, "AddrPCRelByte") == 0); offset *= kPageSize;
base = AlignDown(base, kPageSize);
char sign = '+';
if (offset < 0) {
offset = -offset;
sign = '-';
} }
VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
AppendToOutput("#%c0x%x (addr %p)", sign, offset, instr + offset); char sign = (offset < 0) ? '-' : '+';
void * target = reinterpret_cast<void *>(base + offset);
AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, std::abs(offset), target);
return 13; return 13;
} }
@ -1606,7 +1661,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
sign = '-'; sign = '-';
} }
VIXL_STATIC_ASSERT(sizeof(*instr) == 1); VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, instr + offset); void * address = reinterpret_cast<void *>(instr + offset);
AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, address);
return 8; return 8;
} }

View file

@ -85,7 +85,7 @@ class Disassembler: public DecoderVisitor {
bool IsMovzMovnImm(unsigned reg_size, uint64_t value); bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
void ResetOutput(); void ResetOutput();
void AppendToOutput(const char* string, ...); void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
char* buffer_; char* buffer_;
uint32_t buffer_pos_; uint32_t buffer_pos_;

View file

@ -149,17 +149,24 @@ LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
Instruction* Instruction::ImmPCOffsetTarget() { Instruction* Instruction::ImmPCOffsetTarget() {
Instruction * base = this;
ptrdiff_t offset; ptrdiff_t offset;
if (IsPCRelAddressing()) { if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported. // ADR and ADRP.
offset = ImmPCRel(); offset = ImmPCRel();
if (Mask(PCRelAddressingMask) == ADRP) {
base = AlignDown(base, kPageSize);
offset *= kPageSize;
} else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
}
} else { } else {
// All PC-relative branches. // All PC-relative branches.
VIXL_ASSERT(BranchType() != UnknownBranchType); VIXL_ASSERT(BranchType() != UnknownBranchType);
// Relative branch offsets are instruction-size-aligned. // Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2; offset = ImmBranch() << kInstructionSizeLog2;
} }
return this + offset; return base + offset;
} }
@ -185,10 +192,16 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
void Instruction::SetPCRelImmTarget(Instruction* target) { void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction. int32_t imm21;
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); if ((Mask(PCRelAddressingMask) == ADR)) {
imm21 = target - this;
Instr imm = Assembler::ImmPCRelAddress(target - this); } else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
imm21 = target_page - this_page;
}
Instr imm = Assembler::ImmPCRelAddress(imm21);
SetInstructionBits(Mask(~ImmPCRel_mask) | imm); SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} }

View file

@ -41,6 +41,10 @@ const unsigned kLiteralEntrySize = 4;
const unsigned kLiteralEntrySizeLog2 = 2; const unsigned kLiteralEntrySizeLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MBytes; const unsigned kMaxLoadLiteralRange = 1 * MBytes;
// This is the nominal page size (as used by the adrp instruction); the actual
// size of the memory pages allocated by the kernel is likely to differ.
const unsigned kPageSize = 4 * KBytes;
const unsigned kWRegSize = 32; const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5; const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize / 8; const unsigned kWRegSizeInBytes = kWRegSize / 8;
@ -79,6 +83,12 @@ const unsigned kZeroRegCode = 31;
const unsigned kSPRegInternalCode = 63; const unsigned kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f; const unsigned kRegCodeMask = 0x1f;
const unsigned kAddressTagOffset = 56;
const unsigned kAddressTagWidth = 8;
const uint64_t kAddressTagMask =
((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
// AArch64 floating-point specifics. These match IEEE-754. // AArch64 floating-point specifics. These match IEEE-754.
const unsigned kDoubleMantissaBits = 52; const unsigned kDoubleMantissaBits = 52;
const unsigned kDoubleExponentBits = 11; const unsigned kDoubleExponentBits = 11;

View file

@ -28,14 +28,10 @@
#define PLATFORM_H #define PLATFORM_H
// Define platform specific functionalities. // Define platform specific functionalities.
#include <signal.h>
namespace vixl { namespace vixl {
#ifdef USE_SIMULATOR inline void HostBreakpoint() { raise(SIGINT); }
// Currently we assume running the simulator implies running on x86 hardware.
inline void HostBreakpoint() { asm("int3"); }
#else
inline void HostBreakpoint() { asm("brk"); }
#endif
} // namespace vixl } // namespace vixl
#endif #endif

View file

@ -124,4 +124,14 @@ int CountSetBits(uint64_t value, int width) {
return value; return value;
} }
uint64_t LowestSetBit(uint64_t value) {
return value & -value;
}
bool IsPowerOf2(int64_t value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
} // namespace vixl } // namespace vixl

View file

@ -33,6 +33,14 @@
namespace vixl { namespace vixl {
// Macros for compile-time format checking.
#if defined(__GNUC__)
#define PRINTF_CHECK(format_index, varargs_index) \
__attribute__((format(printf, format_index, varargs_index)))
#else
#define PRINTF_CHECK(format_index, varargs_index)
#endif
// Check number width. // Check number width.
inline bool is_intn(unsigned n, int64_t x) { inline bool is_intn(unsigned n, int64_t x) {
VIXL_ASSERT((0 < n) && (n < 64)); VIXL_ASSERT((0 < n) && (n < 64));
@ -155,6 +163,8 @@ int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width); int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width); int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width); int CountSetBits(uint64_t value, int width);
uint64_t LowestSetBit(uint64_t value);
bool IsPowerOf2(int64_t value);
// Pointer alignment // Pointer alignment
// TODO: rename/refactor to make it specific to instructions. // TODO: rename/refactor to make it specific to instructions.
@ -167,21 +177,31 @@ bool IsWordAligned(T pointer) {
// Increment a pointer until it has the specified alignment. // Increment a pointer until it has the specified alignment.
template<class T> template<class T>
T AlignUp(T pointer, size_t alignment) { T AlignUp(T pointer, size_t alignment) {
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); // Use C-style casts to get static_cast behaviour for integral types (T), and
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); // reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
size_t align_step = (alignment - pointer_raw) % alignment; size_t align_step = (alignment - pointer_raw) % alignment;
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
return reinterpret_cast<T>(pointer_raw + align_step);
return (T)(pointer_raw + align_step);
} }
// Decrement a pointer until it has the specified alignment. // Decrement a pointer until it has the specified alignment.
template<class T> template<class T>
T AlignDown(T pointer, size_t alignment) { T AlignDown(T pointer, size_t alignment) {
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); // Use C-style casts to get static_cast behaviour for integral types (T), and
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); // reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
size_t align_step = pointer_raw % alignment; size_t align_step = pointer_raw % alignment;
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
return reinterpret_cast<T>(pointer_raw - align_step);
return (T)(pointer_raw - align_step);
} }

View file

@ -541,7 +541,7 @@ static QEMUMachine machvirt_a15_machine = {
.name = "virt", .name = "virt",
.desc = "ARM Virtual Machine", .desc = "ARM Virtual Machine",
.init = machvirt_init, .init = machvirt_init,
.max_cpus = 4, .max_cpus = 8,
}; };
static void machvirt_machine_init(void) static void machvirt_machine_init(void)

View file

@ -66,7 +66,8 @@ void gic_update(GICState *s)
best_prio = 0x100; best_prio = 0x100;
best_irq = 1023; best_irq = 1023;
for (irq = 0; irq < s->num_irq; irq++) { for (irq = 0; irq < s->num_irq; irq++) {
if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm)) { if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
(irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) {
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
best_prio = GIC_GET_PRIORITY(irq, cpu); best_prio = GIC_GET_PRIORITY(irq, cpu);
best_irq = irq; best_irq = irq;
@ -372,7 +373,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset)
} }
} else if (offset < 0xf00) { } else if (offset < 0xf00) {
/* Interrupt Configuration. */ /* Interrupt Configuration. */
irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
if (irq >= s->num_irq) if (irq >= s->num_irq)
goto bad_reg; goto bad_reg;
res = 0; res = 0;
@ -558,13 +559,15 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
if (irq >= s->num_irq) if (irq >= s->num_irq)
goto bad_reg; goto bad_reg;
if (irq < GIC_INTERNAL) if (irq < GIC_NR_SGIS)
value |= 0xaa; value |= 0xaa;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
if (value & (1 << (i * 2))) { if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
GIC_SET_MODEL(irq + i); if (value & (1 << (i * 2))) {
} else { GIC_SET_MODEL(irq + i);
GIC_CLEAR_MODEL(irq + i); } else {
GIC_CLEAR_MODEL(irq + i);
}
} }
if (value & (2 << (i * 2))) { if (value & (2 << (i * 2))) {
GIC_SET_EDGE_TRIGGER(irq + i); GIC_SET_EDGE_TRIGGER(irq + i);

View file

@ -128,7 +128,7 @@ static void arm_gic_common_reset(DeviceState *dev)
s->running_priority[i] = 0x100; s->running_priority[i] = 0x100;
s->cpu_enabled[i] = false; s->cpu_enabled[i] = false;
} }
for (i = 0; i < 16; i++) { for (i = 0; i < GIC_NR_SGIS; i++) {
GIC_SET_ENABLED(i, ALL_CPU_MASK); GIC_SET_ENABLED(i, ALL_CPU_MASK);
GIC_SET_EDGE_TRIGGER(i); GIC_SET_EDGE_TRIGGER(i);
} }

View file

@ -191,8 +191,8 @@ typedef struct CPUARMState {
uint64_t par_el1; /* Translation result. */ uint64_t par_el1; /* Translation result. */
uint32_t c9_insn; /* Cache lockdown registers. */ uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data; uint32_t c9_data;
uint32_t c9_pmcr; /* performance monitor control register */ uint64_t c9_pmcr; /* performance monitor control register */
uint32_t c9_pmcnten; /* perf monitor counter enables */ uint64_t c9_pmcnten; /* perf monitor counter enables */
uint32_t c9_pmovsr; /* perf monitor overflow status */ uint32_t c9_pmovsr; /* perf monitor overflow status */
uint32_t c9_pmxevtyper; /* perf monitor event type */ uint32_t c9_pmxevtyper; /* perf monitor event type */
uint32_t c9_pmuserenr; /* perf monitor user enable */ uint32_t c9_pmuserenr; /* perf monitor user enable */
@ -224,7 +224,8 @@ typedef struct CPUARMState {
/* If the counter is enabled, this stores the last time the counter /* If the counter is enabled, this stores the last time the counter
* was reset. Otherwise it stores the counter value * was reset. Otherwise it stores the counter value
*/ */
uint32_t c15_ccnt; uint64_t c15_ccnt;
uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
} cp15; } cp15;
struct { struct {
@ -352,6 +353,17 @@ int cpu_arm_signal_handler(int host_signum, void *pinfo,
int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx); int mmu_idx);
/**
* pmccntr_sync
* @env: CPUARMState
*
* Synchronises the counter in the PMCCNTR. This must always be called twice,
* once before any action that might affect the timer and again afterwards.
* The function is used to swap the state of the register if required.
* This only happens when not in user mode (!CONFIG_USER_ONLY)
*/
void pmccntr_sync(CPUARMState *env);
/* SCTLR bit meanings. Several bits have been reused in newer /* SCTLR bit meanings. Several bits have been reused in newer
* versions of the architecture; in that case we define constants * versions of the architecture; in that case we define constants
* for both old and new bit meanings. Code which tests against those * for both old and new bit meanings. Code which tests against those
@ -1255,7 +1267,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags) target_ulong *cs_base, int *flags)
{ {
int fpen = extract32(env->cp15.c1_coproc, 20, 2); int fpen;
if (arm_feature(env, ARM_FEATURE_V6)) {
fpen = extract32(env->cp15.c1_coproc, 20, 2);
} else {
/* CPACR doesn't exist before v6, so VFP is always accessible */
fpen = 3;
}
if (is_a64(env)) { if (is_a64(env)) {
*pc = env->pc; *pc = env->pc;

View file

@ -123,9 +123,10 @@ static void aarch64_a57_initfn(Object *obj)
cpu->id_isar2 = 0x21232042; cpu->id_isar2 = 0x21232042;
cpu->id_isar3 = 0x01112131; cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x00011142; cpu->id_isar4 = 0x00011142;
cpu->id_isar5 = 0x00011121;
cpu->id_aa64pfr0 = 0x00002222; cpu->id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106; cpu->id_aa64dfr0 = 0x10305106;
cpu->id_aa64isar0 = 0x00010000; cpu->id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124; cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000; cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023; cpu->clidr = 0x0a200023;

View file

@ -548,24 +548,39 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
static inline bool arm_ccnt_enabled(CPUARMState *env)
{
/* This does not support checking PMCCFILTR_EL0 register */
if (!(env->cp15.c9_pmcr & PMCRE)) {
return false;
}
return true;
}
void pmccntr_sync(CPUARMState *env)
{
uint64_t temp_ticks;
temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
get_ticks_per_sec(), 1000000);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
temp_ticks /= 64;
}
if (arm_ccnt_enabled(env)) {
env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
}
}
static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
/* Don't computer the number of ticks in user mode */ pmccntr_sync(env);
uint32_t temp_ticks;
temp_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
get_ticks_per_sec() / 1000000;
if (env->cp15.c9_pmcr & PMCRE) {
/* If the counter is enabled */
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
env->cp15.c15_ccnt = (temp_ticks/64) - env->cp15.c15_ccnt;
} else {
env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
}
}
if (value & PMCRC) { if (value & PMCRC) {
/* The counter has been reset */ /* The counter has been reset */
@ -576,26 +591,20 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.c9_pmcr &= ~0x39; env->cp15.c9_pmcr &= ~0x39;
env->cp15.c9_pmcr |= (value & 0x39); env->cp15.c9_pmcr |= (value & 0x39);
if (env->cp15.c9_pmcr & PMCRE) { pmccntr_sync(env);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
temp_ticks /= 64;
}
env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
}
} }
static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{ {
uint32_t total_ticks; uint64_t total_ticks;
if (!(env->cp15.c9_pmcr & PMCRE)) { if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, do not change value */ /* Counter is disabled, do not change value */
return env->cp15.c15_ccnt; return env->cp15.c15_ccnt;
} }
total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) * total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
get_ticks_per_sec() / 1000000; get_ticks_per_sec(), 1000000);
if (env->cp15.c9_pmcr & PMCRD) { if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */ /* Increment once every 64 processor clock cycles */
@ -607,16 +616,16 @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
uint32_t total_ticks; uint64_t total_ticks;
if (!(env->cp15.c9_pmcr & PMCRE)) { if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, set the absolute value */ /* Counter is disabled, set the absolute value */
env->cp15.c15_ccnt = value; env->cp15.c15_ccnt = value;
return; return;
} }
total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) * total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
get_ticks_per_sec() / 1000000; get_ticks_per_sec(), 1000000);
if (env->cp15.c9_pmcr & PMCRD) { if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */ /* Increment once every 64 processor clock cycles */
@ -624,8 +633,31 @@ static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
} }
env->cp15.c15_ccnt = total_ticks - value; env->cp15.c15_ccnt = total_ticks - value;
} }
static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
uint64_t cur_val = pmccntr_read(env, NULL);
pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
}
#else /* CONFIG_USER_ONLY */
void pmccntr_sync(CPUARMState *env)
{
}
#endif #endif
static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
pmccntr_sync(env);
env->cp15.pmccfiltr_el0 = value & 0x7E000000;
pmccntr_sync(env);
}
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
@ -728,16 +760,28 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn. * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
*/ */
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
.access = PL0_RW, .resetvalue = 0, .access = PL0_RW, .type = ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenset_write, .writefn = pmcntenset_write,
.accessfn = pmreg_access, .accessfn = pmreg_access,
.raw_writefn = raw_write }, .raw_writefn = raw_write },
{ .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
.access = PL0_RW, .accessfn = pmreg_access,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
.writefn = pmcntenset_write, .raw_writefn = raw_write },
{ .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .access = PL0_RW,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access, .accessfn = pmreg_access,
.writefn = pmcntenclr_write, .writefn = pmcntenclr_write,
.type = ARM_CP_NO_MIGRATE }, .type = ARM_CP_NO_MIGRATE },
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
.access = PL0_RW, .accessfn = pmreg_access,
.type = ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenclr_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
.accessfn = pmreg_access, .accessfn = pmreg_access,
@ -755,9 +799,21 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
{ .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
.access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO, .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
.readfn = pmccntr_read, .writefn = pmccntr_write, .readfn = pmccntr_read, .writefn = pmccntr_write32,
.accessfn = pmreg_access }, .accessfn = pmreg_access },
{ .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
.access = PL0_RW, .accessfn = pmreg_access,
.type = ARM_CP_IO,
.readfn = pmccntr_read, .writefn = pmccntr_write, },
#endif #endif
{ .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
.writefn = pmccfiltr_write,
.access = PL0_RW, .accessfn = pmreg_access,
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
.resetvalue = 0, },
{ .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
.access = PL0_RW, .access = PL0_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper), .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
@ -2386,13 +2442,23 @@ void register_cp_regs_for_features(ARMCPU *cpu)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
ARMCPRegInfo pmcr = { ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW, .resetvalue = cpu->midr & 0xff000000, .access = PL0_RW,
.type = ARM_CP_IO, .type = ARM_CP_IO | ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
.accessfn = pmreg_access, .writefn = pmcr_write, .accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write, .raw_writefn = raw_write,
}; };
ARMCPRegInfo pmcr64 = {
.name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
.access = PL0_RW, .accessfn = pmreg_access,
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
.resetvalue = cpu->midr & 0xff000000,
.writefn = pmcr_write, .raw_writefn = raw_write,
};
define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr);
define_one_arm_cp_reg(cpu, &pmcr64);
#endif #endif
ARMCPRegInfo clidr = { ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH, .name = "CLIDR", .state = ARM_CP_STATE_BOTH,