Add modern MIPS Support.

This commit is contained in:
Fedor 2020-06-10 21:12:03 +03:00
parent f05365da1e
commit 2d0a568f07
33 changed files with 985 additions and 962 deletions

View File

@ -90,7 +90,10 @@ class DirReaderLinux {
private:
const int fd_;
unsigned char buf_[512];
union {
linux_dirent dirent_;
unsigned char buf_[512];
};
size_t offset_, size_;
DISALLOW_COPY_AND_ASSIGN(DirReaderLinux);

View File

@ -229,7 +229,11 @@ static inline JS::Value PoisonedObjectValue(JSObject* obj);
namespace detail {
constexpr int CanonicalizedNaNSignBit = 0;
#if defined(__mips__) && !defined(__mips_nan2008)
constexpr uint64_t CanonicalizedNaNSignificand = 0x7FFFFFFFFFFFFULL;
#else
constexpr uint64_t CanonicalizedNaNSignificand = 0x8000000000000ULL;
#endif
constexpr uint64_t CanonicalizedNaNBits =
mozilla::SpecificNaNBits<double,

View File

@ -589,7 +589,23 @@ static ProcessExecutableMemory execMemory;
void*
js::jit::AllocateExecutableMemory(size_t bytes, ProtectionSetting protection)
{
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
// On MIPS, j/jal instructions to branch within the current
// 256 MB-aligned region.
void* allocation = nullptr;
js::Vector<void*, 8, SystemAllocPolicy> unused_maps;
for (;;) {
allocation = execMemory.allocate(bytes, protection);
if ((uintptr_t(allocation) >> 28) == (uintptr_t(allocation + bytes) >> 28))
break;
unused_maps.append(allocation);
}
for (size_t i = 0; i < unused_maps.length(); i++)
DeallocateExecutableMemory(unused_maps[i], bytes);
return allocation;
#else
return execMemory.allocate(bytes, protection);
#endif
}
void

View File

@ -171,6 +171,18 @@ ABIArgGenerator::next(MIRType type)
return softNext(type);
}
bool
js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access)
{
if (!access.align())
return false;
if (access.type() == Scalar::Float64 && access.align() >= 4)
return false;
return access.align() < access.byteSize();
}
// Encode a standard register when it is being used as src1, the dest, and an
// extra register. These should never be called with an InvalidReg.
uint32_t

View File

@ -108,6 +108,8 @@ class ABIArgGenerator
uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
};
bool IsUnaligned(const wasm::MemoryAccessDesc& access);
static constexpr Register ABINonArgReg0 = r4;
static constexpr Register ABINonArgReg1 = r5;
static constexpr Register ABINonArgReg2 = r6;

View File

@ -613,7 +613,7 @@ LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
LAllocation ptr = useRegisterAtStart(base);
if (ins->access().isUnaligned()) {
if (IsUnaligned(ins->access())) {
// Unaligned access expected! Revert to a byte load.
LDefinition ptrCopy = tempCopy(base, 0);
@ -662,7 +662,7 @@ LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
LAllocation ptr = useRegisterAtStart(base);
if (ins->access().isUnaligned()) {
if (IsUnaligned(ins->access())) {
// Unaligned access expected! Revert to a byte store.
LDefinition ptrCopy = tempCopy(base, 0);

View File

@ -92,6 +92,7 @@ void
AssemblerMIPSShared::finish()
{
MOZ_ASSERT(!isFinished);
GenerateMixedJumps();
isFinished = true;
}
@ -100,13 +101,26 @@ AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other)
{
if (!AssemblerShared::asmMergeWith(size(), other))
return false;
for (size_t i = 0; i < other.numLongJumps(); i++) {
size_t off = other.longJumps_[i];
addLongJump(BufferOffset(size() + off));
for (size_t i = 0; i < other.numMixedJumps(); i++) {
const MixedJumpPatch& mjp = other.mixedJumps_[i];
addMixedJump(BufferOffset(size() + mjp.src.getOffset()),
size() + mjp.target, mjp.kind);
}
return m_buffer.appendBuffer(other.m_buffer);
}
void
AssemblerMIPSShared::executableCopy(uint8_t* buffer)
{
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
// Patch all mixed jumps during code copy.
PatchMixedJumps(buffer);
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
uint32_t
AssemblerMIPSShared::actualIndex(uint32_t idx_) const
{
@ -1587,6 +1601,92 @@ AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
label->reset();
}
void
AssemblerMIPSShared::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
intptr_t offset = target - branch;
// Generate the patchable mixed jump for call.
if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) {
addMixedJump(BufferOffset(branch), target);
return;
}
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
}
if (BOffImm16::IsInRange(offset)) {
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
}
MixedJumpPatch::Kind kind = MixedJumpPatch::NONE;
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
if (inst[0].encode() != inst_beq.encode())
kind = MixedJumpPatch::CONDITIONAL;
addMixedJump(BufferOffset(branch), target, kind);
}
void
AssemblerMIPSShared::bind(RepatchLabel* label)
{
BufferOffset dest = nextOffset();
if (label->used() && !oom()) {
// If the label has a use, then change this use to refer to
// the bound label;
BufferOffset b(label->offset());
InstImm* inst = (InstImm*)editSrc(b);
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
intptr_t offset = dest.getOffset() - label->offset();
// If first instruction is j, then this is a mixed jump.
// If second instruction is lui, then this is a loop backedge.
if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) {
// For unconditional mixed branches generated by jumpWithPatch
addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
// Handle open mixed unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
// We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
inst[0] = InstJump(op_j, JOffImm26(0)).encode();
} else {
// Handle open mixed conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(uint32_t)));
// No need for a "nop" here because we can clobber scratch.
// We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
inst[2] = InstJump(op_j, JOffImm26(0)).encode();
}
}
label->bind(dest.getOffset());
}
void
AssemblerMIPSShared::retarget(Label* label, Label* target)
{
@ -1653,6 +1753,25 @@ AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
*(raw - 1) = imm.value;
}
uint32_t
AssemblerMIPSShared::PatchWrite_NearCallSize()
{
return 2 * sizeof(uint32_t);
}
void
AssemblerMIPSShared::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
{
Instruction* inst = (Instruction*) start.raw();
// Overwrite whatever instruction used to be here with a call.
inst[0] = InstJump(op_jal, JOffImm26(uintptr_t(toCall.raw())));
inst[1] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint8_t*
AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
{
@ -1662,6 +1781,82 @@ AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
return reinterpret_cast<uint8_t*>(inst->next());
}
Instruction*
AssemblerMIPSShared::GetInstructionImmediateFromJump(Instruction* jump)
{
if (jump->extractOpcode() == ((uint32_t)op_j >> OpcodeShift) ||
jump->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift))
{
InstJump* j = (InstJump*) jump;
uintptr_t base = (uintptr_t(j) >> Imm28Bits) << Imm28Bits;
uint32_t index = j->extractImm26Value() << 2;
jump = (Instruction*)(base | index);
if (jump->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift))
jump = jump->next();
}
return jump;
}
void
AssemblerMIPSShared::PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target)
{
InstImm* b = (InstImm*)src;
uint32_t opcode = b->extractOpcode();
int offset;
if (mid) {
int o = 0;
InstImm* insn = (InstImm*)mid;
offset = intptr_t(mid);
if (insn->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)) {
o = 1 * sizeof(uint32_t);
Assembler::PatchInstructionImmediate(mid + Assembler::InstructionImmediateSize() +
2 * sizeof(uint32_t), PatchedImmPtr(&b[2]));
}
Assembler::PatchInstructionImmediate(mid + o, PatchedImmPtr(target));
} else {
offset = intptr_t(target);
}
if (((uint32_t)op_j >> OpcodeShift) == opcode ||
((uint32_t)op_jal >> OpcodeShift) == opcode)
{
InstJump* j = (InstJump*)b;
j->setJOffImm26(JOffImm26(offset));
} else {
b[0] = InstJump(op_j, JOffImm26(offset)).encode();
}
}
void
AssemblerMIPSShared::PatchMixedJumps(uint8_t* buffer)
{
// Patch all mixed jumps.
for (size_t i = 0; i < numMixedJumps(); i++) {
MixedJumpPatch& mjp = mixedJump(i);
uint8_t* src = buffer + mjp.src.getOffset();
uint8_t* mid = nullptr;
uint8_t* target = buffer + mjp.target;
InstImm* b = (InstImm*)src;
if (mjp.mid.assigned()) {
mid = buffer + mjp.mid.getOffset();
if (MixedJumpPatch::CONDITIONAL & mjp.kind) {
InstImm* bc = (InstImm*)(buffer + mjp.mid.getOffset());
BOffImm16 offset(Assembler::InstructionImmediateSize() + 2 * sizeof(uint32_t));
bc[0] = invertBranch(b[0], offset);
}
}
PatchMixedJump(src, mid, target);
b[1].makeNop();
}
}
// Since there are no pools in MIPS implementation, this should be simple.
Instruction*
Instruction::next()
@ -1744,3 +1939,12 @@ AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_)
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void
AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value)
{
MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value)));
((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value)));
}

View File

@ -525,21 +525,13 @@ class JOffImm26
}
int32_t decode() {
MOZ_ASSERT(!isInvalid());
return (int32_t(data << 8) >> 6) + 4;
return int32_t(data << 8) >> 6;
}
explicit JOffImm26(int offset)
: data ((offset - 4) >> 2 & Imm26Mask)
: data (offset >> 2 & Imm26Mask)
{
MOZ_ASSERT((offset & 0x3) == 0);
MOZ_ASSERT(IsInRange(offset));
}
static bool IsInRange(int offset) {
if ((offset - 4) < -536870912)
return false;
if ((offset - 4) > 536870908)
return false;
return true;
}
static const uint32_t INVALID = 0x20000000;
JOffImm26()
@ -840,6 +832,27 @@ class AssemblerMIPSShared : public AssemblerShared
TestForFalse
};
struct MixedJumpPatch
{
enum Kind {
NONE,
PATCHABLE,
CONDITIONAL,
};
BufferOffset src;
BufferOffset mid;
uintptr_t target;
Kind kind;
MixedJumpPatch(BufferOffset src, uintptr_t target, Kind kind)
: src(src),
mid(BufferOffset()),
target(target),
kind(kind)
{ }
};
// :( this should be protected, but since CodeGenerator
// wants to use it, It needs to go out here :(
@ -873,7 +886,7 @@ class AssemblerMIPSShared : public AssemblerShared
};
js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
js::Vector<MixedJumpPatch, 8, SystemAllocPolicy> mixedJumps_;
CompactBufferWriter jumpRelocations_;
CompactBufferWriter dataRelocations_;
@ -922,7 +935,9 @@ class AssemblerMIPSShared : public AssemblerShared
public:
void finish();
bool asmMergeWith(const AssemblerMIPSShared& other);
void executableCopy(void* buffer);
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
void copyPreBarrierTable(uint8_t* dest);
@ -1197,8 +1212,9 @@ class AssemblerMIPSShared : public AssemblerShared
// label operations
void bind(Label* label, BufferOffset boff = BufferOffset());
void bindLater(Label* label, wasm::TrapDesc target);
virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
virtual void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) = 0;
void bind(RepatchLabel* label);
void bind(CodeOffset* label) {
label->bind(currentOffset());
}
@ -1240,16 +1256,21 @@ class AssemblerMIPSShared : public AssemblerShared
writeRelocation(src);
}
void addLongJump(BufferOffset src) {
enoughMemory_ &= longJumps_.append(src.getOffset());
void addMixedJump(BufferOffset src, uintptr_t target,
MixedJumpPatch::Kind kind = MixedJumpPatch::NONE)
{
enoughMemory_ &= mixedJumps_.append(MixedJumpPatch(src, target, kind));
}
virtual void GenerateMixedJumps() = 0;
void PatchMixedJumps(uint8_t* buffer);
public:
size_t numLongJumps() const {
return longJumps_.length();
size_t numMixedJumps() const {
return mixedJumps_.length();
}
uint32_t longJump(size_t i) {
return longJumps_[i];
MixedJumpPatch& mixedJump(size_t i) {
return mixedJumps_[i];
}
void flushBuffer() {
@ -1261,18 +1282,24 @@ class AssemblerMIPSShared : public AssemblerShared
}
static uint32_t NopSize() { return 4; }
static uint32_t PatchWrite_NearCallSize();
static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static uint32_t AlignDoubleArg(uint32_t offset) {
return (offset + 1U) &~ 1U;
}
static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
static Instruction* GetInstructionImmediateFromJump(Instruction* jump);
static void PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target);
static void ToggleToJmp(CodeLocationLabel inst_);
static void ToggleToCmp(CodeLocationLabel inst_);
static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value);
void processCodeLabels(uint8_t* rawCode);
bool bailed() {
@ -1487,6 +1514,10 @@ class InstJump : public Instruction
uint32_t extractImm26Value() {
return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
}
void setJOffImm26(JOffImm26 off) {
// Reset immediate field and replace it
data = (data & ~Imm26Mask) | off.encode();
}
};
// Class for Loongson-specific instructions
@ -1516,6 +1547,20 @@ class InstGS : public Instruction
{ }
};
inline bool
IsUnaligned(const wasm::MemoryAccessDesc& access)
{
if (!access.align())
return false;
#ifdef JS_CODEGEN_MIPS32
if (access.type() == Scalar::Int64 && access.align() >= 4)
return false;
#endif
return access.align() < access.byteSize();
}
} // namespace jit
} // namespace js

View File

@ -1915,16 +1915,18 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
FloatRegister output = ToFloatRegister(lir->output());
if (byteSize == 4)
masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
masm.loadUnalignedFloat32(mir->access(), address, temp, output);
else
masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
masm.loadUnalignedDouble(mir->access(), address, temp, output);
} else {
masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
masm.ma_load_unaligned(mir->access(), ToRegister(lir->output()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@ -1934,16 +1936,20 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
}
if (isFloat) {
if (byteSize == 4)
masm.loadFloat32(address, ToFloatRegister(lir->output()));
else
masm.loadDouble(address, ToFloatRegister(lir->output()));
FloatRegister output = ToFloatRegister(lir->output());
if (byteSize == 4) {
masm.loadFloat32(address, output);
} else {
masm.computeScaledAddress(address, SecondScratchReg);
masm.as_ld(output, SecondScratchReg, 0);
}
} else {
masm.ma_load(ToRegister(lir->output()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@ -2000,16 +2006,18 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
FloatRegister value = ToFloatRegister(lir->value());
if (byteSize == 4)
masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
masm.storeUnalignedFloat32(mir->access(), value, temp, address);
else
masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
masm.storeUnalignedDouble(mir->access(), value, temp, address);
} else {
masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
masm.ma_store_unaligned(mir->access(), ToRegister(lir->value()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@ -2019,16 +2027,23 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
}
if (isFloat) {
FloatRegister value = ToFloatRegister(lir->value());
if (byteSize == 4) {
masm.storeFloat32(ToFloatRegister(lir->value()), address);
} else
masm.storeDouble(ToFloatRegister(lir->value()), address);
masm.storeFloat32(value, address);
} else {
// For time being storeDouble for mips32 uses two store instructions,
// so we emit only one to get correct behavior in case of OOB access.
masm.computeScaledAddress(address, SecondScratchReg);
masm.as_sd(value, SecondScratchReg, 0);
}
} else {
masm.ma_store(ToRegister(lir->value()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
// Only the last emitted instruction is a memory access.
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@ -2412,7 +2427,7 @@ CodeGeneratorMIPSShared::visitUDivOrMod(LUDivOrMod* ins)
if (ins->canBeDivideByZero()) {
if (ins->mir()->isTruncated()) {
if (ins->trapOnError()) {
masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::InvalidConversionToInteger), Assembler::Zero);
masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
} else {
// Infinity|0 == 0
Label notzero;

View File

@ -324,7 +324,7 @@ LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
LAllocation ptr = useRegisterAtStart(base);
if (ins->access().isUnaligned()) {
if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
if (ins->access().offset())
@ -367,7 +367,7 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
MDefinition* value = ins->value();
LAllocation baseAlloc = useRegisterAtStart(base);
if (ins->access().isUnaligned()) {
if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());

View File

@ -39,6 +39,17 @@ MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm)
}
}
// This method generates lui and ori instruction pair that can be modified by
// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
// during execution (eg. jit::PatchJump).
void
MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm)
{
m_buffer.ensureSpace(2 * sizeof(uint32_t));
as_lui(dest, Imm16::Upper(imm).encode());
as_ori(dest, dest, Imm16::Lower(imm).encode());
}
// Shifts
void
MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift)
@ -225,8 +236,14 @@ template <typename L>
void
MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
{
as_addu(rd, rs, rt);
as_sltu(SecondScratchReg, rd, rs);
if (rd != rs) {
as_addu(rd, rs, rt);
as_sltu(SecondScratchReg, rd, rs);
} else {
ma_move(SecondScratchReg, rs);
as_addu(rd, rs, rt);
as_sltu(SecondScratchReg, rd, SecondScratchReg);
}
ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
}
@ -446,7 +463,7 @@ MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
}
void
MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
MacroAssemblerMIPSShared::ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
@ -460,36 +477,41 @@ MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
hiOffset = Imm16(src.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
BufferOffset load;
switch (size) {
case SizeHalfWord:
as_lbu(dest, base, lowOffset);
if (extension != ZeroExtend)
as_lbu(temp, base, hiOffset);
load = as_lbu(temp, base, hiOffset);
else
as_lb(temp, base, hiOffset);
load = as_lb(temp, base, hiOffset);
as_lbu(dest, base, lowOffset);
as_ins(dest, temp, 8, 24);
break;
case SizeWord:
as_lwl(dest, base, hiOffset);
load = as_lwl(dest, base, hiOffset);
as_lwr(dest, base, lowOffset);
#ifdef JS_CODEGEN_MIPS64
if (extension != ZeroExtend)
as_dext(dest, dest, 0, 32);
#endif
break;
#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
as_ldl(dest, base, hiOffset);
load = as_ldl(dest, base, hiOffset);
as_ldr(dest, base, lowOffset);
break;
#endif
default:
MOZ_CRASH("Invalid argument for ma_load");
}
append(access, load.getOffset(), asMasm().framePushed());
}
void
@ -593,13 +615,13 @@ MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
}
void
MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
MacroAssemblerMIPSShared::ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
Register base;
asMasm().computeEffectiveAddress(dest, SecondScratchReg);
asMasm().computeScaledAddress(dest, SecondScratchReg);
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
base = SecondScratchReg;
@ -607,29 +629,87 @@ MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& des
hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
BufferOffset store;
switch (size) {
case SizeHalfWord:
as_sb(data, base, lowOffset);
as_ext(temp, data, 8, 8);
as_sb(temp, base, hiOffset);
store = as_sb(temp, base, hiOffset);
as_sb(data, base, lowOffset);
break;
case SizeWord:
as_swl(data, base, hiOffset);
store = as_swl(data, base, hiOffset);
as_swr(data, base, lowOffset);
break;
#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
as_sdl(data, base, hiOffset);
store = as_sdl(data, base, hiOffset);
as_sdr(data, base, lowOffset);
break;
#endif
default:
MOZ_CRASH("Invalid argument for ma_store");
}
append(access, store.getOffset(), asMasm().framePushed());
}
void
MacroAssemblerMIPSShared::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
{
MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
if (BOffImm16::IsInRange(offset))
jumpKind = ShortJump;
if (jumpKind == ShortJump) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
code.setBOffImm16(BOffImm16(offset));
writeInst(code.encode());
as_nop();
return;
}
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
if (code.encode() == inst_beq.encode()) {
// Handle mixed jump
addMixedJump(nextOffset(), label->offset());
as_j(JOffImm26(0));
as_nop();
return;
}
// Handle long conditional branch
addMixedJump(nextOffset(), label->offset(), MixedJumpPatch::CONDITIONAL);
writeInst(code.encode());
as_nop();
return;
}
// Generate mixed jump and link it to a label.
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(2 * sizeof(uint32_t));
if (jumpKind == ShortJump) {
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
}
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
}
// Branches when done from within mips-specific code.
@ -639,7 +719,7 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Conditi
switch (c) {
case Equal :
case NotEqual:
asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
break;
case Always:
ma_b(label, jumpKind);
@ -649,11 +729,11 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Conditi
case Signed:
case NotSigned:
MOZ_ASSERT(lhs == rhs);
asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
branchWithCode(getBranchCode(lhs, c), label, jumpKind);
break;
default:
Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
break;
}
}
@ -668,11 +748,19 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label, Condition
else if (c == Below)
; // This condition is always false. No branch required.
else
asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
branchWithCode(getBranchCode(lhs, c), label, jumpKind);
} else {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, label, c, jumpKind);
switch (c) {
case Equal:
case NotEqual:
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, label, c, jumpKind);
break;
default:
Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
}
}
}
@ -705,7 +793,7 @@ template void MacroAssemblerMIPSShared::ma_b<ImmTag>(Register lhs, ImmTag rhs,
void
MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind)
{
asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
}
void
@ -716,6 +804,30 @@ MacroAssemblerMIPSShared::ma_b(wasm::TrapDesc target, JumpKind jumpKind)
bindLater(&label, target);
}
void
MacroAssemblerMIPSShared::ma_jal(Label* label)
{
if (label->bound()) {
// Generate the mixed jump.
addMixedJump(nextOffset(), label->offset());
as_jal(JOffImm26(0));
as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer. The '2'
// instructions are writing at below (contain delay slot).
m_buffer.ensureSpace(2 * sizeof(uint32_t));
BufferOffset bo = as_jal(JOffImm26(0));
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
}
Assembler::Condition
MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
{
@ -768,20 +880,59 @@ MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, C
// beq at,$zero,offs
as_slt(scratch, rhs, lhs);
return Equal;
case Equal :
case NotEqual:
case Zero:
case NonZero:
case Always:
case Signed:
case NotSigned:
MOZ_CRASH("There is a better way to compare for equality.");
break;
case Overflow:
MOZ_CRASH("Overflow condition not supported for MIPS.");
break;
default:
MOZ_CRASH("Invalid condition for branch.");
MOZ_CRASH("Invalid condition.");
}
return Always;
}
Assembler::Condition
MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Imm32 imm, Condition c)
{
switch (c) {
case Above:
case BelowOrEqual:
if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
// lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
as_sltiu(scratch, lhs, imm.value + 1);
return (c == BelowOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
as_sltu(scratch, scratch, lhs);
return (c == BelowOrEqual ? Equal : NotEqual);
}
case AboveOrEqual:
case Below:
if (Imm16::IsInSignedRange(imm.value)) {
as_sltiu(scratch, lhs, imm.value);
} else {
ma_li(scratch, imm);
as_sltu(scratch, lhs, scratch);
}
return (c == AboveOrEqual ? Equal : NotEqual);
case GreaterThan:
case LessThanOrEqual:
if (Imm16::IsInSignedRange(imm.value + 1)) {
// lhs <= rhs via lhs < rhs + 1.
as_slti(scratch, lhs, imm.value + 1);
return (c == LessThanOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
as_slt(scratch, scratch, lhs);
return (c == LessThanOrEqual ? Equal : NotEqual);
}
case GreaterThanOrEqual:
case LessThan:
if (Imm16::IsInSignedRange(imm.value)) {
as_slti(scratch, lhs, imm.value);
} else {
ma_li(scratch, imm);
as_slt(scratch, lhs, scratch);
}
return (c == GreaterThanOrEqual ? Equal : NotEqual);
default:
MOZ_CRASH("Invalid condition.");
}
return Always;
}
@ -853,22 +1004,21 @@ MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Cond
case Zero:
MOZ_ASSERT(rs == rt);
// seq d,s,$zero =>
// xor d,s,$zero
// sltiu d,d,1
as_xor(rd, rs, zero);
as_sltiu(rd, rd, 1);
// sltiu d,s,1
as_sltiu(rd, rs, 1);
break;
case NonZero:
MOZ_ASSERT(rs == rt);
// sne d,s,$zero =>
// xor d,s,$zero
// sltu d,$zero,d
as_xor(rd, rs, zero);
as_sltu(rd, zero, rd);
// sltu d,$zero,s
as_sltu(rd, zero, rs);
break;
case Signed:
MOZ_ASSERT(rs == rt);
as_slt(rd, rs, zero);
break;
case NotSigned:
MOZ_ASSERT(rs == rt);
// sge d,s,$zero =>
// slt d,s,$zero
// xori d,d,1
@ -876,7 +1026,7 @@ MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Cond
as_xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition for ma_cmp_set.");
MOZ_CRASH("Invalid condition.");
}
}
@ -947,43 +1097,128 @@ MacroAssemblerMIPSShared::compareFloatingPoint(FloatFormat fmt, FloatRegister lh
}
}
void
MacroAssemblerMIPSShared::GenerateMixedJumps()
{
// Generate all mixed jumps.
for (size_t i = 0; i < numMixedJumps(); i++) {
MixedJumpPatch& mjp = mixedJump(i);
if (MixedJumpPatch::NONE == mjp.kind && mjp.target <= size())
continue;
BufferOffset bo = m_buffer.nextOffset();
if (MixedJumpPatch::CONDITIONAL & mjp.kind) {
// Leave space for conditional branch.
as_nop();
asMasm().ma_liPatchable(ScratchRegister, ImmWord(0));
as_jr(ScratchRegister);
}
asMasm().ma_liPatchable(ScratchRegister, ImmWord(0));
as_jr(ScratchRegister);
as_nop();
mjp.mid = bo;
}
}
void
MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c)
{
ma_li(dest, Imm32(0));
ma_li(ScratchRegister, Imm32(1));
FloatTestKind moveCondition;
compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
ma_li(dest, Imm32(1));
if (moveCondition == TestForTrue)
as_movt(dest, ScratchRegister);
as_movf(dest, zero);
else
as_movf(dest, ScratchRegister);
as_movt(dest, zero);
}
void
MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c)
{
ma_li(dest, Imm32(0));
ma_li(ScratchRegister, Imm32(1));
FloatTestKind moveCondition;
compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
ma_li(dest, Imm32(1));
if (moveCondition == TestForTrue)
as_movt(dest, ScratchRegister);
as_movf(dest, zero);
else
as_movf(dest, ScratchRegister);
as_movt(dest, zero);
}
void
MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
{
ma_li(ScratchRegister, imm);
ma_cmp_set(rd, rs, ScratchRegister, c);
if (imm.value == 0) {
switch (c) {
case Equal :
case BelowOrEqual:
as_sltiu(rd, rs, 1);
break;
case NotEqual:
case Above:
as_sltu(rd, zero, rs);
break;
case AboveOrEqual:
case Below:
as_ori(rd, zero, c == AboveOrEqual ? 1: 0);
break;
case GreaterThan:
case LessThanOrEqual:
as_slt(rd, zero, rs);
if (c == LessThanOrEqual)
as_xori(rd, rd, 1);
break;
case LessThan:
case GreaterThanOrEqual:
as_slt(rd, rs, zero);
if (c == GreaterThanOrEqual)
as_xori(rd, rd, 1);
break;
case Zero:
as_sltiu(rd, rs, 1);
break;
case NonZero:
as_sltu(rd, zero, rs);
break;
case Signed:
as_slt(rd, rs, zero);
break;
case NotSigned:
as_slt(rd, rs, zero);
as_xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition.");
}
return;
}
switch (c) {
case Equal:
case NotEqual:
MOZ_ASSERT(rs != ScratchRegister);
ma_xor(rd, rs, imm);
if (c == Equal)
as_sltiu(rd, rd, 1);
else
as_sltu(rd, zero, rd);
break;
case Zero:
case NonZero:
case Signed:
case NotSigned:
MOZ_CRASH("Invalid condition.");
default:
Condition cond = ma_cmp(rd, rs, imm, c);
MOZ_ASSERT(cond == Equal || cond == NotEqual);
if(cond == Equal)
as_xori(rd, rd, 1);
}
}
// fp instructions
@ -1071,7 +1306,7 @@ MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* l
{
FloatTestKind testKind;
compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
}
void
@ -1080,7 +1315,7 @@ MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* l
{
FloatTestKind testKind;
compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
}
void
@ -1561,7 +1796,7 @@ MacroAssembler::call(Register reg)
CodeOffset
MacroAssembler::call(Label* label)
{
ma_bal(label);
ma_jal(label);
return CodeOffset(currentOffset());
}

View File

@ -34,7 +34,7 @@ enum LoadStoreExtension
enum JumpKind
{
LongJump = 0,
MixedJump = 0,
ShortJump = 1
};
@ -54,17 +54,21 @@ class MacroAssemblerMIPSShared : public Assembler
const MacroAssembler& asMasm() const;
Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c, FloatTestKind* testKind,
FPConditionBit fcc = FCC0);
void GenerateMixedJumps();
public:
void ma_move(Register rd, Register rs);
void ma_li(Register dest, ImmGCPtr ptr);
void ma_li(Register dest, Imm32 imm);
void ma_liPatchable(Register dest, Imm32 imm);
// Shift operations
void ma_sll(Register rd, Register rt, Imm32 shift);
@ -104,7 +108,7 @@ class MacroAssemblerMIPSShared : public Assembler
// load
void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// store
@ -112,7 +116,7 @@ class MacroAssemblerMIPSShared : public Assembler
LoadStoreExtension extension = SignExtend);
void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// arithmetic based ops
@ -145,21 +149,24 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
int32_t shift, Label* negZero = nullptr);
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) {
void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, l, c, jumpKind);
}
template <typename T>
void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
JumpKind jumpKind = LongJump);
JumpKind jumpKind = MixedJump);
void ma_b(Label* l, JumpKind jumpKind = LongJump);
void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
void ma_b(Label* l, JumpKind jumpKind = MixedJump);
void ma_b(wasm::TrapDesc target, JumpKind jumpKind = MixedJump);
void ma_jal(Label* l);
// fp instructions
void ma_lis(FloatRegister dest, float value);
@ -171,9 +178,9 @@ class MacroAssemblerMIPSShared : public Assembler
//FP branches
void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_call(ImmPtr dest);
@ -184,6 +191,12 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
BufferOffset ma_BoundsCheck(Register bounded) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(bounded, Imm32(0));
return bo;
}
void moveToDoubleLo(Register src, FloatRegister dest) {
as_mtc1(src, dest);
}

View File

@ -120,11 +120,14 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
Instruction* inst1 = (Instruction*)jump_.raw();
Instruction* inst2 = inst1->next();
Instruction* inst1;
Instruction* inst2;
inst1 = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
inst2 = inst1->next();
MaybeAutoWritableJitCode awjc(inst1, 8, reprotect);
Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
AssemblerMIPSShared::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
AutoFlushICache::flush(uintptr_t(inst1), 8);
}
@ -146,34 +149,17 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
} else {
if (target == JitRuntime::BackedgeLoopHeader) {
Instruction* lui = &branch[1];
Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr);
// Jump to ori. The lui will be executed in delay slot.
branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
} else {
Instruction* lui = &branch[4];
Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr);
branch->setBOffImm16(BOffImm16(4 * sizeof(uint32_t)));
}
}
}
void
Assembler::executableCopy(uint8_t* buffer)
{
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
// Patch all long jumps during code copy.
for (size_t i = 0; i < longJumps_.length(); i++) {
Instruction* inst1 = (Instruction*) ((uint32_t)buffer + longJumps_[i]);
uint32_t value = Assembler::ExtractLuiOriValue(inst1, inst1->next());
Assembler::UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
}
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@ -207,7 +193,7 @@ TraceOneDataRelocation(JSTracer* trc, Instruction* inst)
TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
"ion-masm-ptr");
if (ptr != prior) {
Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
AutoFlushICache::flush(uintptr_t(inst), 8);
}
}
@ -306,153 +292,10 @@ Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
if (label->bound()) {
intptr_t offset = label->offset();
Instruction* inst = (Instruction*) (rawCode + offset);
Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address);
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address);
}
}
void
Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
int32_t offset = target - branch;
InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
}
// Generate the long jump for calls because return address has to be the
// address after the reserved block.
if (inst[0].encode() == inst_bgezal.encode()) {
addLongJump(BufferOffset(branch));
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
// There is 1 nop after this.
return;
}
if (BOffImm16::IsInRange(offset)) {
bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
inst[0].encode() != inst_beq.encode());
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
// Skip the trailing nops in conditional branches.
if (conditional) {
inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*))).encode();
// There are 2 nops after this
}
return;
}
if (inst[0].encode() == inst_beq.encode()) {
// Handle long unconditional jump.
addLongJump(BufferOffset(branch));
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
} else {
// Handle long conditional jump.
inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
// No need for a "nop" here because we can clobber scratch.
addLongJump(BufferOffset(branch + sizeof(void*)));
Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
}
}
void
Assembler::bind(RepatchLabel* label)
{
BufferOffset dest = nextOffset();
if (label->used() && !oom()) {
// If the label has a use, then change this use to refer to
// the bound label;
BufferOffset b(label->offset());
InstImm* inst = (InstImm*)editSrc(b);
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
uint32_t offset = dest.getOffset() - label->offset();
// If first instruction is lui, then this is a long jump.
// If second instruction is lui, then this is a loop backedge.
if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
// For unconditional long branches generated by ma_liPatchable,
// such as under:
// jumpWithpatch
Assembler::UpdateLuiOriValue(inst, inst->next(), dest.getOffset());
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
// branchWithCode
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
// Handle open long unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
// We need to add it to long jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
addLongJump(BufferOffset(label->offset()));
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, dest.getOffset());
inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
} else {
// Handle open long conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
// No need for a "nop" here because we can clobber scratch.
// We need to add it to long jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
MOZ_ASSERT(inst[4].encode() == NopInst);
addLongJump(BufferOffset(label->offset() + sizeof(void*)));
Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, dest.getOffset());
inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
}
}
label->bind(dest.getOffset());
}
uint32_t
Assembler::PatchWrite_NearCallSize()
{
return 4 * sizeof(uint32_t);
}
void
Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
{
Instruction* inst = (Instruction*) start.raw();
uint8_t* dest = toCall.raw();
// Overwrite whatever instruction used to be here with a call.
// Always use long jump for two reasons:
// - Jump has to be the same size because of PatchWrite_NearCallSize.
// - Return address has to be at the end of replaced block.
// Short jump wouldn't be more efficient.
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
inst[3] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint32_t
Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1)
{
@ -466,24 +309,6 @@ Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1)
return value;
}
void
Assembler::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value)
{
MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value)));
((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value)));
}
void
Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
Register reg, uint32_t value)
{
*inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
*inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
}
void
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue)
@ -503,7 +328,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal
MOZ_ASSERT(value == uint32_t(expectedValue.value));
// Replace with new value
Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
AutoFlushICache::flush(uintptr_t(inst), 8);
}
@ -512,7 +337,7 @@ void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{
InstImm* inst = (InstImm*)code;
Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
}
uint32_t

View File

@ -144,28 +144,15 @@ class Assembler : public AssemblerMIPSShared
}
public:
using AssemblerMIPSShared::bind;
void bind(RepatchLabel* label);
void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer);
static uint32_t PatchWrite_NearCallSize();
static uint32_t InstructionImmediateSize() {
return 2 * sizeof(uint32_t);
}
static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value);
static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1,
Register reg, uint32_t value);
static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue);
static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,

View File

@ -490,11 +490,11 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
MOZ_ASSERT(INT64LOW_OFFSET == 0);
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
masm.ma_load_unaligned(mir->access(), output.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
if (!isSigned)
@ -502,12 +502,11 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
else
masm.ma_sra(output.high, output.low, Imm32(31));
} else {
ScratchRegisterScope scratch(masm);
masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
masm.ma_load_unaligned(mir->access(), output.low,
BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord, ZeroExtend);
masm.ma_load_unaligned(mir->access(), output.high,
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
SizeWord, SignExtend);
}
return;
}
@ -515,15 +514,15 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
if (byteSize <= 4) {
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
if (!isSigned)
masm.move32(Imm32(0), output.high);
else
masm.ma_sra(output.high, output.low, Imm32(31));
} else {
ScratchRegisterScope scratch(masm);
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
masm.ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
}
masm.memoryBarrier(mir->access().barrierAfter());
@ -577,20 +576,19 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
MOZ_ASSERT(INT64LOW_OFFSET == 0);
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
} else {
ScratchRegisterScope scratch(masm);
masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
masm.ma_store_unaligned(mir->access(), value.high,
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
SizeWord, SignExtend);
masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, SizeWord, ZeroExtend);
}
return;
}
@ -598,11 +596,12 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
if (byteSize <= 4) {
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize));
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
} else {
ScratchRegisterScope scratch(masm);
masm.ma_store(value.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
}
masm.memoryBarrier(mir->access().barrierAfter());

View File

@ -302,8 +302,9 @@ MacroAssembler::neg64(Register64 reg)
void
MacroAssembler::mulBy3(Register src, Register dest)
{
as_addu(dest, src, src);
as_addu(dest, dest, src);
MOZ_ASSERT(src != ScratchRegister);
as_addu(ScratchRegister, src, src);
as_addu(dest, ScratchRegister, src);
}
void
@ -1042,7 +1043,7 @@ MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
InstImm* i1 = (InstImm*) i0->next();
// Replace with new value
Assembler::UpdateLuiOriValue(i0, i1, limit);
AssemblerMIPSShared::UpdateLuiOriValue(i0, i1, limit);
}
//}}} check_macroassembler_style

View File

@ -202,17 +202,6 @@ MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm)
ma_li(dest, Imm32(uint32_t(imm.value)));
}
// This method generates lui and ori instruction pair that can be modified by
// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
// during execution (eg. jit::PatchJump).
void
MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
{
m_buffer.ensureSpace(2 * sizeof(uint32_t));
as_lui(dest, Imm16::Upper(imm).encode());
as_ori(dest, dest, Imm16::Lower(imm).encode());
}
void
MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
{
@ -514,122 +503,6 @@ MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c,
ma_b(SecondScratchReg, imm, label, c, jumpKind);
}
void
MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
{
if (label->bound()) {
// Generate the long jump for calls because return address has to be
// the address after the reserved block.
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, Imm32(label->offset()));
as_jalr(ScratchRegister);
if (delaySlotFill == FillDelaySlot)
as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(4 * sizeof(uint32_t));
BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
// Leave space for long jump.
as_nop();
if (delaySlotFill == FillDelaySlot)
as_nop();
}
void
MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
{
MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
if (BOffImm16::IsInRange(offset))
jumpKind = ShortJump;
if (jumpKind == ShortJump) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
code.setBOffImm16(BOffImm16(offset));
writeInst(code.encode());
as_nop();
return;
}
if (code.encode() == inst_beq.encode()) {
// Handle long jump
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, Imm32(label->offset()));
as_jr(ScratchRegister);
as_nop();
return;
}
// Handle long conditional branch
writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
// No need for a "nop" here because we can clobber scratch.
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, Imm32(label->offset()));
as_jr(ScratchRegister);
as_nop();
return;
}
// Generate open jump and link it to a label.
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
if (jumpKind == ShortJump) {
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(2 * sizeof(uint32_t));
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
return;
}
bool conditional = code.encode() != inst_beq.encode();
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
// Leave space for potential long jump.
as_nop();
as_nop();
if (conditional)
as_nop();
}
void
MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
{
ma_lw(ScratchRegister, addr);
ma_cmp_set(rd, rs, ScratchRegister, c);
}
void
MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
{
ma_lw(ScratchRegister, lhs);
ma_cmp_set(dst, ScratchRegister, rhs, c);
}
// fp instructions
void
@ -928,26 +801,32 @@ MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
}
void
MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp,
FloatRegister dest)
MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
uint32_t framePushed = asMasm().framePushed();
BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
append(access, load.getOffset(), framePushed);
moveToDoubleLo(temp, dest);
as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
append(access, load.getOffset(), framePushed);
moveToDoubleHi(temp, dest);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
append(access, load.getOffset(), framePushed);
moveToDoubleLo(temp, dest);
as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
append(access, load.getOffset(), framePushed);
moveToDoubleHi(temp, dest);
}
}
@ -980,21 +859,21 @@ MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
}
void
MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
FloatRegister dest)
MacroAssemblerMIPSCompat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
as_lwl(temp, SecondScratchReg, src.offset + 3);
load = as_lwl(temp, SecondScratchReg, src.offset + 3);
as_lwr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_lwl(temp, ScratchRegister, 3);
load = as_lwl(temp, ScratchRegister, 3);
as_lwr(temp, ScratchRegister, 0);
}
append(access, load.getOffset(), asMasm().framePushed());
moveToFloat32(temp, dest);
}
@ -1132,46 +1011,52 @@ MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
}
void
MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp,
const BaseIndex& dest)
MacroAssemblerMIPSCompat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromFloat32(src, temp);
BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
as_swl(temp, SecondScratchReg, dest.offset + 3);
store = as_swl(temp, SecondScratchReg, dest.offset + 3);
as_swr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_swl(temp, ScratchRegister, 3);
store = as_swl(temp, ScratchRegister, 3);
as_swr(temp, ScratchRegister, 0);
}
append(access, store.getOffset(), asMasm().framePushed());
}
void
MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp,
const BaseIndex& dest)
MacroAssemblerMIPSCompat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
uint32_t framePushed = asMasm().framePushed();
BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
moveFromDoubleHi(src, temp);
store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
moveFromDoubleLo(src, temp);
as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
moveFromDoubleHi(src, temp);
as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
moveFromDoubleHi(src, temp);
store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
moveFromDoubleLo(src, temp);
as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
moveFromDoubleHi(src, temp);
as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
}
append(access, store.getOffset(), framePushed);
}
// Note: this function clobbers the input register.
@ -1608,13 +1493,12 @@ MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentatio
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
addLongJump(bo);
ma_liPatchable(ScratchRegister, Imm32(dest));
as_jr(ScratchRegister);
if (label->bound())
addMixedJump(bo, label->offset(), MixedJumpPatch::PATCHABLE);
as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}

View File

@ -46,6 +46,7 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
public:
using MacroAssemblerMIPSShared::ma_b;
using MacroAssemblerMIPSShared::ma_li;
using MacroAssemblerMIPSShared::ma_liPatchable;
using MacroAssemblerMIPSShared::ma_ss;
using MacroAssemblerMIPSShared::ma_sd;
using MacroAssemblerMIPSShared::ma_load;
@ -55,7 +56,6 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void ma_li(Register dest, CodeOffset* label);
void ma_liPatchable(Register dest, Imm32 imm);
void ma_li(Register dest, ImmWord imm);
void ma_liPatchable(Register dest, ImmPtr imm);
void ma_liPatchable(Register dest, ImmWord imm);
@ -89,28 +89,25 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void ma_pop(Register r);
void ma_push(Register r);
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump)
{
ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
}
void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump)
{
ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
}
void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeWord);
ma_lw(ScratchRegister, addr);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
// fp instructions
void ma_lid(FloatRegister dest, double value);
@ -128,12 +125,19 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
}
void ma_cmp_set(Register rd, Register rs, Address addr, Condition c);
void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c);
void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
void ma_cmp_set(Register dst, Register lhs, Address addr, Condition c) {
MOZ_ASSERT(lhs != ScratchRegister);
ma_lw(ScratchRegister, addr);
ma_cmp_set(dst, lhs, ScratchRegister, c);
}
void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_lw(ScratchRegister, lhs);
ma_li(SecondScratchReg, Imm32(uint32_t(imm.value)));
ma_cmp_set(dst, ScratchRegister, SecondScratchReg, c);
ma_cmp_set(dst, ScratchRegister, rhs, c);
}
void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
ma_lw(SecondScratchReg, lhs);
ma_cmp_set(dst, SecondScratchReg, imm, c);
}
// These fuctions abstract the access to high part of the double precision
@ -893,7 +897,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
@ -901,7 +906,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
void store8(Register src, const Address& address);
void store8(Imm32 imm, const Address& address);
@ -942,8 +948,10 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src,
Register temp, const BaseIndex& dest);
void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src,
Register temp, const BaseIndex& dest);
void moveDouble(FloatRegister src, FloatRegister dest) {
as_movd(dest, src);
@ -990,12 +998,6 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
ma_sw(imm, addr);
}
BufferOffset ma_BoundsCheck(Register bounded) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(bounded, ImmWord(0));
return bo;
}
void moveFloat32(FloatRegister src, FloatRegister dest) {
as_movs(dest, src);
}

View File

@ -87,7 +87,9 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
Instruction* inst = (Instruction*)jump_.raw();
Instruction* inst;
inst = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
// Six instructions used in load 64-bit imm.
MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
@ -125,23 +127,6 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
}
}
void
Assembler::executableCopy(uint8_t* buffer)
{
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
// Patch all long jumps during code copy.
for (size_t i = 0; i < longJumps_.length(); i++) {
Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
uint64_t value = Assembler::ExtractLoad64Value(inst);
Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
}
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@ -243,155 +228,6 @@ Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
}
}
void
Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
int64_t offset = target - branch;
InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
}
// Generate the long jump for calls because return address has to be the
// address after the reserved block.
if (inst[0].encode() == inst_bgezal.encode()) {
addLongJump(BufferOffset(branch));
Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
// There is 1 nop after this.
return;
}
if (BOffImm16::IsInRange(offset)) {
// Don't skip trailing nops can improve performance
// on Loongson3 platform.
bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
inst[0].encode() != inst_beq.encode());
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
if (skipNops) {
inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
// There are 4 nops after this
}
return;
}
if (inst[0].encode() == inst_beq.encode()) {
// Handle long unconditional jump.
addLongJump(BufferOffset(branch));
Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
} else {
// Handle long conditional jump.
inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
// No need for a "nop" here because we can clobber scratch.
addLongJump(BufferOffset(branch + sizeof(uint32_t)));
Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
}
}
void
Assembler::bind(RepatchLabel* label)
{
BufferOffset dest = nextOffset();
if (label->used() && !oom()) {
// If the label has a use, then change this use to refer to
// the bound label;
BufferOffset b(label->offset());
InstImm* inst = (InstImm*)editSrc(b);
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
uint64_t offset = dest.getOffset() - label->offset();
// If first instruction is lui, then this is a long jump.
// If second instruction is lui, then this is a loop backedge.
if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
// For unconditional long branches generated by ma_liPatchable,
// such as under:
// jumpWithpatch
Assembler::UpdateLoad64Value(inst, dest.getOffset());
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
// branchWithCode
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
// Handle open long unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
// We need to add it to long jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
MOZ_ASSERT(inst[4].encode() == NopInst);
MOZ_ASSERT(inst[5].encode() == NopInst);
addLongJump(BufferOffset(label->offset()));
Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset());
inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
} else {
// Handle open long conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
// No need for a "nop" here because we can clobber scratch.
// We need to add it to long jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
MOZ_ASSERT(inst[4].encode() == NopInst);
MOZ_ASSERT(inst[5].encode() == NopInst);
MOZ_ASSERT(inst[6].encode() == NopInst);
addLongJump(BufferOffset(label->offset() + sizeof(uint32_t)));
Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset());
inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
}
}
label->bind(dest.getOffset());
}
uint32_t
Assembler::PatchWrite_NearCallSize()
{
// Load an address needs 4 instructions, and a jump with a delay slot.
return (4 + 2) * sizeof(uint32_t);
}
void
Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
{
Instruction* inst = (Instruction*) start.raw();
uint8_t* dest = toCall.raw();
// Overwrite whatever instruction used to be here with a call.
// Always use long jump for two reasons:
// - Jump has to be the same size because of PatchWrite_NearCallSize.
// - Return address has to be at the end of replaced block.
// Short jump wouldn't be more efficient.
Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
inst[5] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint64_t
Assembler::ExtractLoad64Value(Instruction* inst0)
{
@ -452,19 +288,6 @@ Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value)
i5->setImm16(Imm16::Lower(Imm32(value)));
}
void
Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value)
{
Instruction* inst1 = inst0->next();
Instruction* inst2 = inst1->next();
Instruction* inst3 = inst2->next();
*inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
*inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
*inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
*inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
}
void
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue)
@ -492,8 +315,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal
void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{
InstImm* inst = (InstImm*)code;
Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
Assembler::UpdateLoad64Value((Instruction*)code, (uint64_t)imm.value);
}
uint64_t

View File

@ -143,28 +143,17 @@ class Assembler : public AssemblerMIPSShared
static uintptr_t GetPointer(uint8_t*);
using AssemblerMIPSShared::bind;
void bind(RepatchLabel* label);
void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer);
static uint32_t PatchWrite_NearCallSize();
static uint32_t InstructionImmediateSize() {
return 4 * sizeof(uint32_t);
}
static uint64_t ExtractLoad64Value(Instruction* inst0);
static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value);
static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue);
static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,

View File

@ -449,10 +449,10 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
masm.ma_load_unaligned(mir->access(), ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
@ -460,6 +460,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@ -514,16 +515,17 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
if (mir->access().isUnaligned()) {
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
masm.ma_store_unaligned(mir->access(), ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
}
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}

View File

@ -279,8 +279,9 @@ MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register
void
MacroAssembler::mulBy3(Register src, Register dest)
{
as_daddu(dest, src, src);
as_daddu(dest, dest, src);
MOZ_ASSERT(src != ScratchRegister);
as_daddu(ScratchRegister, src, src);
as_daddu(dest, ScratchRegister, src);
}
void
@ -706,8 +707,12 @@ MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
void
MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
{
Instruction* inst = (Instruction*) patchAt;
InstImm* i0 = (InstImm*) inst;
InstImm* i1 = (InstImm*) i0->next();
// Replace with new value
Assembler::UpdateLoad64Value((Instruction*) patchAt, limit);
AssemblerMIPSShared::UpdateLuiOriValue(i0, i1, limit);
}
//}}} check_macroassembler_style
@ -721,9 +726,8 @@ inline void
MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
Register dest)
{
loadPtr(lhs, ScratchRegister);
movePtr(rhs, SecondScratchReg);
cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest);
loadPtr(lhs, SecondScratchReg);
cmpPtrSet(cond, SecondScratchReg, rhs, dest);
}
template<>

View File

@ -258,36 +258,45 @@ MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm)
{
int64_t value = imm.value;
if (value >= INT16_MIN && value <= INT16_MAX) {
if (-1 == (value >> 15) || 0 == (value >> 15)) {
as_addiu(dest, zero, value);
} else if (imm.value <= UINT16_MAX) {
as_ori(dest, zero, Imm16::Lower(Imm32(value)).encode());
} else if (value >= INT32_MIN && value <= INT32_MAX) {
as_lui(dest, Imm16::Upper(Imm32(value)).encode());
if (value & 0xffff)
as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
} else if (imm.value <= UINT32_MAX) {
as_lui(dest, Imm16::Upper(Imm32(value)).encode());
if (value & 0xffff)
as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
as_dinsu(dest, zero, 32, 32);
} else {
uint64_t high = imm.value >> 32;
return;
}
if (0 == (value >> 16)) {
as_ori(dest, zero, value);
return;
}
if (imm.value >> 48) {
as_lui(dest, Imm16::Upper(Imm32(high)).encode());
if (high & 0xffff)
as_ori(dest, dest, Imm16::Lower(Imm32(high)).encode());
if (-1 == (value >> 31) || 0 == (value >> 31)) {
as_lui(dest, uint16_t(value >> 16));
} else if (0 == (value >> 32)) {
as_lui(dest, uint16_t(value >> 16));
as_dinsu(dest, zero, 32, 32);
} else if (-1 == (value >> 47) || 0 == (value >> 47)) {
as_lui(dest, uint16_t(value >> 32));
if (uint16_t(value >> 16))
as_ori(dest, dest, uint16_t(value >> 16));
as_dsll(dest, dest, 16);
} else if (0 == (value >> 48)) {
as_lui(dest, uint16_t(value >> 32));
as_dinsu(dest, zero, 32, 32);
if (uint16_t(value >> 16))
as_ori(dest, dest, uint16_t(value >> 16));
as_dsll(dest, dest, 16);
} else {
as_lui(dest, uint16_t(value >> 48));
if (uint16_t(value >> 32))
as_ori(dest, dest, uint16_t(value >> 32));
if (uint16_t(value >> 16)) {
as_dsll(dest, dest, 16);
as_ori(dest, dest, uint16_t(value >> 16));
as_dsll(dest, dest, 16);
} else {
as_lui(dest, Imm16::Lower(Imm32(high)).encode());
as_dsll32(dest, dest, 32);
}
if ((imm.value >> 16) & 0xffff)
as_ori(dest, dest, Imm16::Upper(Imm32(value)).encode());
as_dsll(dest, dest, 16);
if (value & 0xffff)
as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
}
if (uint16_t(value))
as_ori(dest, dest, uint16_t(value));
}
// This method generates lui, dsll and ori instruction block that can be modified
@ -488,7 +497,7 @@ void
MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
{
// Check for signed range because of as_daddiu
if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
if (Imm16::IsInSignedRange(imm.value)) {
as_daddiu(SecondScratchReg, rs, imm.value);
as_addiu(rd, rs, imm.value);
ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
@ -710,14 +719,8 @@ MacroAssemblerMIPS64::ma_push(Register r)
void
MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind)
{
MOZ_ASSERT(c != Overflow);
if (imm.value == 0) {
if (c == Always || c == AboveOrEqual)
ma_b(label, jumpKind);
else if (c == Below)
; // This condition is always false. No branch required.
else
branchWithCode(getBranchCode(lhs, c), label, jumpKind);
if (imm.value <= INT32_MAX) {
ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
} else {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
@ -747,128 +750,21 @@ MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c
ma_b(SecondScratchReg, imm, label, c, jumpKind);
}
void
MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
{
if (label->bound()) {
// Generate the long jump for calls because return address has to be
// the address after the reserved block.
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
as_jalr(ScratchRegister);
if (delaySlotFill == FillDelaySlot)
as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer. The '6'
// instructions are writing at below (contain delay slot).
m_buffer.ensureSpace(6 * sizeof(uint32_t));
BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
// Leave space for long jump.
as_nop();
as_nop();
as_nop();
if (delaySlotFill == FillDelaySlot)
as_nop();
}
void
MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
{
MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
if (BOffImm16::IsInRange(offset))
jumpKind = ShortJump;
if (jumpKind == ShortJump) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
code.setBOffImm16(BOffImm16(offset));
writeInst(code.encode());
as_nop();
return;
}
if (code.encode() == inst_beq.encode()) {
// Handle long jump
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
as_jr(ScratchRegister);
as_nop();
return;
}
// Handle long conditional branch, the target offset is based on self,
// point to next instruction of nop at below.
writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
// No need for a "nop" here because we can clobber scratch.
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
as_jr(ScratchRegister);
as_nop();
return;
}
// Generate open jump and link it to a label.
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
if (jumpKind == ShortJump) {
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(2 * sizeof(uint32_t));
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
return;
}
bool conditional = code.encode() != inst_beq.encode();
// Make the whole branch continous in the buffer. The '7'
// instructions are writing at below (contain conditional nop).
m_buffer.ensureSpace(7 * sizeof(uint32_t));
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
// Leave space for potential long jump.
as_nop();
as_nop();
as_nop();
as_nop();
if (conditional)
as_nop();
}
void
MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c)
{
ma_li(ScratchRegister, imm);
ma_cmp_set(rd, rs, ScratchRegister, c);
if (imm.value <= INT32_MAX) {
ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
} else {
ma_li(ScratchRegister, imm);
ma_cmp_set(rd, rs, ScratchRegister, c);
}
}
void
MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c)
{
ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value)));
ma_cmp_set(rd, rs, ScratchRegister, c);
ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
}
// fp instructions
@ -877,6 +773,10 @@ MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value)
{
ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
if (imm.value == 0) {
moveToDouble(zero, dest);
return;
}
ma_li(ScratchRegister, imm);
moveToDouble(ScratchRegister, dest);
}
@ -1149,21 +1049,21 @@ MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
}
void
MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp,
FloatRegister dest)
MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
as_ldl(temp, SecondScratchReg, src.offset + 7);
load = as_ldl(temp, SecondScratchReg, src.offset + 7);
as_ldr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_ldl(temp, ScratchRegister, 7);
load = as_ldl(temp, ScratchRegister, 7);
as_ldr(temp, ScratchRegister, 0);
}
append(access, load.getOffset(), asMasm().framePushed());
moveToDouble(temp, dest);
}
@ -1195,21 +1095,21 @@ MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest
}
void
MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
FloatRegister dest)
MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
as_lwl(temp, SecondScratchReg, src.offset + 3);
load = as_lwl(temp, SecondScratchReg, src.offset + 3);
as_lwr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_lwl(temp, ScratchRegister, 3);
load = as_lwl(temp, ScratchRegister, 3);
as_lwr(temp, ScratchRegister, 0);
}
append(access, load.getOffset(), asMasm().framePushed());
moveToFloat32(temp, dest);
}
@ -1279,6 +1179,10 @@ MacroAssemblerMIPS64Compat::store32(Register src, const Address& address)
void
MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address)
{
if (src.value == 0) {
ma_store(zero, address, SizeWord);
return;
}
move32(src, SecondScratchReg);
ma_store(SecondScratchReg, address, SizeWord);
}
@ -1347,39 +1251,42 @@ MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
}
void
MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp,
const BaseIndex& dest)
MacroAssemblerMIPS64Compat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromFloat32(src, temp);
BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
as_swl(temp, SecondScratchReg, dest.offset + 3);
store = as_swl(temp, SecondScratchReg, dest.offset + 3);
as_swr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_swl(temp, ScratchRegister, 3);
store = as_swl(temp, ScratchRegister, 3);
as_swr(temp, ScratchRegister, 0);
}
append(access, store.getOffset(), asMasm().framePushed());
}
void
MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp,
const BaseIndex& dest)
MacroAssemblerMIPS64Compat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromDouble(src, temp);
BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
as_sdl(temp, SecondScratchReg, dest.offset + 7);
store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
as_sdr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
as_sdl(temp, ScratchRegister, 7);
store = as_sdl(temp, ScratchRegister, 7);
as_sdr(temp, ScratchRegister, 0);
}
append(access, store.getOffset(), asMasm().framePushed());
}
// Note: this function clobbers the input register.
@ -1455,22 +1362,52 @@ MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand&
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest)
{
Label isInt32, done;
Register tag = splitTagForTest(operand);
asMasm().branchTestInt32(Assembler::Equal, tag, &isInt32);
ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
jump(&done);
bind(&isInt32);
ma_sll(dest, operand.valueReg(), Imm32(0));
bind(&done);
}
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest)
{
Label isInt32, done;
loadPtr(Address(src.base, src.offset), dest);
splitTag(dest, SecondScratchReg);
asMasm().branchTestInt32(Assembler::Equal, SecondScratchReg, &isInt32);
ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
jump(&done);
bind(&isInt32);
ma_sll(dest, dest, Imm32(0));
bind(&done);
}
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest)
{
Label isInt32, done;
computeScaledAddress(src, SecondScratchReg);
loadPtr(Address(SecondScratchReg, src.offset), dest);
splitTag(dest, SecondScratchReg);
asMasm().branchTestInt32(Assembler::Equal, SecondScratchReg, &isInt32);
ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
jump(&done);
bind(&isInt32);
ma_sll(dest, dest, Imm32(0));
bind(&done);
}
void
@ -1823,13 +1760,12 @@ MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentat
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
addLongJump(bo);
ma_liPatchable(ScratchRegister, ImmWord(dest));
as_jr(ScratchRegister);
if (label->bound())
addMixedJump(bo, label->offset(), MixedJumpPatch::PATCHABLE);
as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}
@ -1878,7 +1814,11 @@ MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address d
ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(32));
} else {
ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
}
storePtr(SecondScratchReg, Address(dest.base, dest.offset));
}
@ -1925,8 +1865,13 @@ void
MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest)
{
MOZ_ASSERT(dest.valueReg() != ScratchRegister);
if (payload != dest.valueReg())
ma_move(dest.valueReg(), payload);
if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
ma_dext(dest.valueReg(), payload, Imm32(0), Imm32(32));
} else {
if (payload != dest.valueReg()) {
ma_move(dest.valueReg(), payload);
}
}
ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
}
@ -2051,9 +1996,9 @@ MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
// We're going to be returning by the ion calling convention
ma_pop(ra);
as_ld(ra, StackPointer, 0);
as_jr(ra);
as_nop();
as_daddiu(StackPointer, StackPointer, sizeof(intptr_t)); // in delay slot.
// If we found a catch handler, this must be a baseline frame. Restore
// state and jump to the catch block.
@ -2075,8 +2020,9 @@ MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
pushValue(BooleanValue(true));
pushValue(exception);
jump(a0);
as_daddiu(StackPointer, StackPointer, -sizeof(intptr_t));
as_jr(a0);
as_sd(exception.valueReg(), StackPointer, 0); // In delay slot
// Only used in debug mode. Return BaselineFrame->returnValue() to the
// caller.

View File

@ -115,20 +115,17 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void ma_pop(Register r);
void ma_push(Register r);
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeDouble);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
// fp instructions
void ma_lid(FloatRegister dest, double value);
@ -473,7 +470,12 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
ma_li(dest, Imm32(tag));
ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
ma_dins(dest, src, Imm32(0), Imm32(32));
} else {
ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
}
}
void storeValue(ValueOperand val, Operand dst);
@ -905,7 +907,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
@ -913,7 +916,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
void store8(Register src, const Address& address);
void store8(Imm32 imm, const Address& address);
@ -952,8 +956,10 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src,
Register temp, const BaseIndex& dest);
void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src,
Register temp, const BaseIndex& dest);
void moveDouble(FloatRegister src, FloatRegister dest) {
as_movd(dest, src);
@ -1009,12 +1015,6 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
as_nop();
}
BufferOffset ma_BoundsCheck(Register bounded) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(bounded, ImmWord(0));
return bo;
}
void moveFloat32(FloatRegister src, FloatRegister dest) {
as_movs(dest, src);
}

View File

@ -747,7 +747,6 @@ class MemoryAccessDesc
TrapOffset trapOffset() const { return *trapOffset_; }
bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
bool isSimd() const { return Scalar::isSimdType(type_); }
bool isUnaligned() const { return align() && align() < byteSize(); }
bool isPlainAsmJS() const { return !hasTrap(); }
void clearOffset() { offset_ = 0; }

View File

@ -1772,12 +1772,14 @@ elif test "$CPU_ARCH" = "arm"; then
dnl ARM platforms may trap on unaligned accesses; catch the signal and
dnl recover.
elif test "$CPU_ARCH" = "mips32"; then
AC_DEFINE(JS_CODEGEN_MIPS32)
JS_CODEGEN_MIPS32=1
elif test "$CPU_ARCH" = "mips64"; then
AC_DEFINE(JS_CODEGEN_MIPS64)
JS_CODEGEN_MIPS64=1
elif test "$CPU_ARCH" = "mips" || test "$CPU_ARCH" = "mips32" || test "$CPU_ARCH" = "mips64"; then
if test ! "$HAVE_64BIT_BUILD"; then
AC_DEFINE(JS_CODEGEN_MIPS32)
JS_CODEGEN_MIPS32=1
else
AC_DEFINE(JS_CODEGEN_MIPS64)
JS_CODEGEN_MIPS64=1
fi
fi
AC_SUBST(JS_SIMULATOR)

View File

@ -3206,7 +3206,7 @@ class BaseCompiler
// This is the temp register passed as the last argument to load()
MOZ_MUST_USE size_t loadStoreTemps(MemoryAccessDesc& access) {
#if defined(JS_CODEGEN_ARM)
if (access.isUnaligned()) {
if (IsUnaligned(access)) {
switch (access.type()) {
case Scalar::Float32:
return 1;
@ -3391,7 +3391,7 @@ class BaseCompiler
#ifdef JS_CODEGEN_ARM
void
loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
if (access.byteSize() > 1 && access.isUnaligned()) {
if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0);
@ -3405,7 +3405,7 @@ class BaseCompiler
void
storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
if (access.byteSize() > 1 && access.isUnaligned()) {
if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0);
} else {
@ -3419,7 +3419,7 @@ class BaseCompiler
void
loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low,
@ -3440,7 +3440,7 @@ class BaseCompiler
void
storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4);
@ -3459,7 +3459,7 @@ class BaseCompiler
void
loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
masm.add32(HeapReg, ptr.reg);
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
masm.ma_vxfer(tmp1.reg, dest.reg);
@ -3473,7 +3473,7 @@ class BaseCompiler
void
storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
masm.add32(HeapReg, ptr.reg);
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
masm.ma_vxfer(src.reg, tmp1.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
} else {
@ -3486,7 +3486,7 @@ class BaseCompiler
void
loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr.reg);
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4);
@ -3501,7 +3501,7 @@ class BaseCompiler
void
storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr.reg);
if (access.isUnaligned()) {
if (IsUnaligned(ins->access())) {
masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4);
@ -5991,7 +5991,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::I32: {
RegI32 rp = popI32();
#ifdef JS_CODEGEN_ARM
RegI32 rv = access.isUnaligned() ? needI32() : rp;
RegI32 rv = IsUnaligned(access) ? needI32() : rp;
#else
RegI32 rv = rp;
#endif

View File

@ -51,7 +51,7 @@ typedef struct r_malloc_chunk_ {
#endif
UCHAR type;
UINT4 size;
UCHAR memory[1];
UINT8 memory[1];
} r_malloc_chunk;
#define CHUNK_MEMORY_OFFSET offsetof(struct r_malloc_chunk_, memory)

View File

@ -114,11 +114,6 @@
#define ARCH_CPU_LITTLE_ENDIAN 1
#elif defined(__pnacl__)
#define ARCH_CPU_32_BITS 1
#elif defined(__MIPSEL__)
#define ARCH_CPU_MIPS_FAMILY 1
#define ARCH_CPU_MIPSEL 1
#define ARCH_CPU_32_BITS 1
#define ARCH_CPU_LITTLE_ENDIAN 1
#elif defined(__powerpc64__)
#define ARCH_CPU_PPC_FAMILY 1
#define ARCH_CPU_PPC64 1
@ -140,7 +135,16 @@
#elif defined(__mips__)
#define ARCH_CPU_MIPS_FAMILY 1
#define ARCH_CPU_MIPS 1
#if defined(__LP64__)
#define ARCH_CPU_64_BITS 1
#else
#define ARCH_CPU_32_BITS 1
#endif
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define ARCH_CPU_LITTLE_ENDIAN 1
#else
#define ARCH_CPU_BIG_ENDIAN 1
#endif
#elif defined(__hppa__)
#define ARCH_CPU_HPPA 1
#define ARCH_CPU_32_BITS 1

View File

@ -615,6 +615,11 @@ int VP8EncoderImpl::SetCpuSpeed(int width, int height) {
// On mobile platform, always set to -12 to leverage between cpu usage
// and video quality.
return -12;
#elif defined(WEBRTC_ARCH_MIPS)
// On mips platform, temporarily set to -12 to leverage between cpu usage
// and video quality.
// TODO: Once improved the coding performance,recover the complexity setting.
return -12;
#else
// For non-ARM, increase encoding complexity (i.e., use lower speed setting)
// if resolution is below CIF. Otherwise, keep the default/user setting
@ -1103,7 +1108,7 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
cfg.h = cfg.w = 0; // set after decode
vpx_codec_flags_t flags = 0;
#ifndef WEBRTC_ARCH_ARM
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_MIPS)
flags = VPX_CODEC_USE_POSTPROC;
#ifdef INDEPENDENT_PARTITIONS
flags |= VPX_CODEC_USE_INPUT_PARTITION;

View File

@ -37,6 +37,8 @@
#define _PR_SI_ARCHITECTURE "sparc"
#elif defined(__i386__)
#define _PR_SI_ARCHITECTURE "x86"
#elif defined(__mips64__)
#define _PR_SI_ARCHITECTURE "mips64"
#elif defined(__mips__)
#define _PR_SI_ARCHITECTURE "mips"
#elif defined(__arm__)

View File

@ -25,6 +25,8 @@
#define _PR_SI_ARCHITECTURE "sparc64"
#elif defined(__sparc__)
#define _PR_SI_ARCHITECTURE "sparc"
#elif defined(__mips64__)
#define _PR_SI_ARCHITECTURE "mips64"
#elif defined(__mips__)
#define _PR_SI_ARCHITECTURE "mips"
#elif defined(__arm32__) || defined(__arm__) || defined(__armel__) \

View File

@ -67,10 +67,10 @@ NESTED(_NS_InvokeByIndex, FRAMESZ, ra)
jal invoke_copy_to_stack
REG_L t3, 8(sp) # get previous a0
REG_L sp, 0(sp) # get orig sp back
REG_L s0, 0(sp) # get orig sp back and save away our stack pointer
REG_L a0, A0OFF(sp) # a0 - that
REG_L a1, A1OFF(sp) # a1 - methodIndex
REG_L a0, A0OFF(s0) # a0 - that
REG_L a1, A1OFF(s0) # a1 - methodIndex
# t1 = methodIndex * pow(2, PTRLOG)
# (use shift instead of mult)
@ -105,13 +105,12 @@ NESTED(_NS_InvokeByIndex, FRAMESZ, ra)
l.d $f18, 40(t1)
l.d $f19, 48(t1)
# save away our stack pointer and create
# the stack pointer for the function
move s0, sp
# create the stack pointer for the function
move sp, t3
jalr t9
## restore stack pointer.
move sp, s0
RESTORE_GP64