diff options
author | machenbach@chromium.org <machenbach@chromium.org> | 2014-09-23 12:44:49 +0000 |
---|---|---|
committer | machenbach@chromium.org <machenbach@chromium.org> | 2014-09-23 12:44:49 +0000 |
commit | 2800ec31a4cd20c4d8a40805da863925ed90a062 (patch) | |
tree | 33ac5230f74659f222daca27bbe5d9869ce90c0b | |
parent | 6002d7a5d938c309ebbdcedf98f06fd28696ec21 (diff) | |
download | v8-2800ec31a4cd20c4d8a40805da863925ed90a062.tar.gz |
Version 3.29.84 (based on bleeding_edge revision r24096)
Performance and stability improvements on all platforms.
git-svn-id: https://v8.googlecode.com/svn/trunk@24149 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
138 files changed, 1767 insertions, 2872 deletions
@@ -1,3 +1,8 @@ +2014-09-23: Version 3.29.84 + + Performance and stability improvements on all platforms. + + 2014-09-23: Version 3.29.83 Performance and stability improvements on all platforms. diff --git a/PRESUBMIT.py b/PRESUBMIT.py index 3a9895db8..43d6b5b8e 100644 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -34,32 +34,6 @@ for more details about the presubmit API built into gcl. import sys -_EXCLUDED_PATHS = ( - r"^test[\\\/].*", - r"^testing[\\\/].*", - r"^third_party[\\\/].*", - r"^tools[\\\/].*", -) - - -# Regular expression that matches code only used for test binaries -# (best effort). -_TEST_CODE_EXCLUDED_PATHS = ( - r'.+-unittest\.cc', - # Has a method VisitForTest(). - r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc', - # Test extension. - r'src[\\\/]extensions[\\\/]gc-extension\.cc', -) - - -_TEST_ONLY_WARNING = ( - 'You might be calling functions intended only for testing from\n' - 'production code. It is OK to ignore this warning if you know what\n' - 'you are doing, as the heuristics used to detect the situation are\n' - 'not perfect. The commit queue will not block on this warning.') - - def _V8PresubmitChecks(input_api, output_api): """Runs the V8 presubmit checks.""" import sys @@ -139,49 +113,6 @@ def _CheckUnwantedDependencies(input_api, output_api): return results -def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api): - """Attempts to prevent use of functions intended only for testing in - non-testing code. For now this is just a best-effort implementation - that ignores header files and may have some false positives. A - better implementation would probably need a proper C++ parser. - """ - # We only scan .cc files, as the declaration of for-testing functions in - # header files are hard to distinguish from calls to such functions without a - # proper C++ parser. - file_inclusion_pattern = r'.+\.cc' - - base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?' - inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern) - comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern) - exclusion_pattern = input_api.re.compile( - r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % ( - base_function_pattern, base_function_pattern)) - - def FilterFile(affected_file): - black_list = (_EXCLUDED_PATHS + - _TEST_CODE_EXCLUDED_PATHS + - input_api.DEFAULT_BLACK_LIST) - return input_api.FilterSourceFile( - affected_file, - white_list=(file_inclusion_pattern, ), - black_list=black_list) - - problems = [] - for f in input_api.AffectedSourceFiles(FilterFile): - local_path = f.LocalPath() - for line_number, line in f.ChangedContents(): - if (inclusion_pattern.search(line) and - not comment_pattern.search(line) and - not exclusion_pattern.search(line)): - problems.append( - '%s:%d\n %s' % (local_path, line_number, line.strip())) - - if problems: - return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)] - else: - return [] - - def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" results = [] @@ -191,8 +122,6 @@ def _CommonChecks(input_api, output_api): input_api, output_api)) results.extend(_V8PresubmitChecks(input_api, output_api)) results.extend(_CheckUnwantedDependencies(input_api, output_api)) - results.extend( - _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api)) return results diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 25270d15f..d879f29af 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -3465,8 +3465,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); + __ JumpIfNotUniqueName(tmp1, &miss); + __ JumpIfNotUniqueName(tmp2, &miss); // Unique names are compared by identity. __ cmp(left, right); @@ -3698,7 +3698,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ ldrb(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entity_name, miss); + __ JumpIfNotUniqueName(entity_name, miss); __ bind(&good); // Restore the properties. @@ -3868,7 +3868,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); + __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); } } diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index a06ed7344..11b170b73 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -319,26 +319,30 @@ bool LCodeGen::GenerateJumpTable() { // Each entry in the jump table generates one instruction and inlines one // 32bit data after it. if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - jump_table_.length() * 7)) { + deopt_jump_table_.length() * 7)) { Abort(kGeneratedCodeIsTooLarge); } - if (jump_table_.length() > 0) { + if (deopt_jump_table_.length() > 0) { Label needs_frame, call_deopt_entry; Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; + Address base = deopt_jump_table_[0].address; Register entry_offset = scratch0(); - int length = jump_table_.length(); + int length = deopt_jump_table_.length(); for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; + Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i]; __ bind(&table_entry->label); - DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); + Deoptimizer::BailoutType type = table_entry->bailout_type; + DCHECK(type == deopt_jump_table_[0].bailout_type); Address entry = table_entry->address; - DeoptComment(table_entry->reason); + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + DeoptComment(table_entry->mnemonic, table_entry->reason); // Second-level deopt table entries are contiguous and small, so instead // of loading the full, absolute address of each one, load an immediate @@ -842,7 +846,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - const char* detail, + const char* reason, Deoptimizer::BailoutType bailout_type) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -895,35 +899,35 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, __ stop("trap_on_deopt", condition); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (condition == al && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ Call(entry, RelocInfo::RUNTIME_ENTRY); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. - if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); + if (deopt_jump_table_.is_empty() || + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().bailout_type != bailout_type) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); + deopt_jump_table_.Add(table_entry, zone()); } - __ b(condition, &jump_table_.last().label); + __ b(condition, &deopt_jump_table_.last().label); } } void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, detail, bailout_type); + DeoptimizeIf(condition, instr, reason, bailout_type); } @@ -4972,22 +4976,26 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ bind(&check_false); __ LoadRoot(ip, Heap::kFalseValueRootIndex); __ cmp(scratch2, Operand(ip)); - DeoptimizeIf(ne, instr, "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(ne, instr); __ mov(input_reg, Operand::Zero()); } else { - DeoptimizeIf(ne, instr, "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(ne, instr); __ sub(ip, scratch2, Operand(kHeapObjectTag)); __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); - DeoptimizeIf(ne, instr, "lost precision or NaN"); + __ RecordComment("Deferred TaggedToI: lost precision or NaN"); + DeoptimizeIf(ne, instr); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ cmp(input_reg, Operand::Zero()); __ b(ne, &done); __ VmovHigh(scratch1, double_scratch2); __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(ne, instr); } } __ bind(&done); diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index cb137d1c9..ca8c56302 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -26,7 +26,7 @@ class LCodeGen: public LCodeGenBase { LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), - jump_table_(4, info->zone()), + deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), @@ -235,9 +235,9 @@ class LCodeGen: public LCodeGenBase { void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); void DeoptimizeIf(Condition condition, LInstruction* instr, - const char* detail, Deoptimizer::BailoutType bailout_type); + const char* reason, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition condition, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void AddToTranslation(LEnvironment* environment, Translation* translation, @@ -332,7 +332,7 @@ class LCodeGen: public LCodeGenBase { void EmitVectorLoadICRegisters(T* instr); ZoneList<LEnvironment*> deoptimizations_; - ZoneList<Deoptimizer::JumpTableEntry> jump_table_; + ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index c845a3d7e..92615e1bb 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -3199,8 +3199,8 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, - Label* not_unique_name) { +void MacroAssembler::JumpIfNotUniqueName(Register reg, + Label* not_unique_name) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 030b87a0d..057591af3 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -1340,7 +1340,7 @@ class MacroAssembler: public Assembler { void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label* failure); - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name); void EmitSeqStringSetCharCheck(Register string, Register index, diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index 1702e468d..93b0e2867 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -3370,8 +3370,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { // To avoid a miss, each instance type should be either SYMBOL_TYPE or it // should have kInternalizedTag set. - __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss); - __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss); + __ JumpIfNotUniqueName(lhs_instance_type, &miss); + __ JumpIfNotUniqueName(rhs_instance_type, &miss); // Unique names are compared by identity. STATIC_ASSERT(EQUAL == 0); @@ -4488,7 +4488,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ Ldrb(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entity_name, miss); + __ JumpIfNotUniqueName(entity_name, miss); __ Bind(&good); } @@ -4575,7 +4575,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // Check if the entry name is not a unique name. __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); + __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); } } diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index 40c5c4238..a7ea29b9f 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -839,8 +839,12 @@ bool LCodeGen::GenerateJumpTable() { Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; __ Bind(&table_entry->label); + Deoptimizer::BailoutType type = table_entry->bailout_type; Address entry = table_entry->address; - DeoptComment(table_entry->reason); + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + DeoptComment(table_entry->mnemonic, table_entry->reason); // Second-level deopt table entries are contiguous and small, so instead // of loading the full, absolute address of each one, load the base @@ -989,7 +993,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { void LCodeGen::DeoptimizeBranch( - LInstruction* instr, const char* detail, BranchType branch_type, + LInstruction* instr, const char* reason, BranchType branch_type, Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -1040,22 +1044,21 @@ void LCodeGen::DeoptimizeBranch( __ Bind(&dont_trap); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to build frame, or restore caller doubles. if (branch_type == always && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ Call(entry, RelocInfo::RUNTIME_ENTRY); } else { - Deoptimizer::JumpTableEntry* table_entry = - new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. - if (jump_table_.is_empty() || - !table_entry->IsEquivalentTo(*jump_table_.last())) { + if (jump_table_.is_empty() || (jump_table_.last()->address != entry) || + (jump_table_.last()->bailout_type != bailout_type) || + (jump_table_.last()->needs_frame != !frame_is_built_)) { + Deoptimizer::JumpTableEntry* table_entry = + new (zone()) Deoptimizer::JumpTableEntry( + entry, instr->Mnemonic(), reason, bailout_type, !frame_is_built_); jump_table_.Add(table_entry, zone()); } __ B(&jump_table_.last()->label, branch_type, reg, bit); @@ -1065,78 +1068,78 @@ void LCodeGen::DeoptimizeBranch( void LCodeGen::Deoptimize(LInstruction* instr, Deoptimizer::BailoutType* override_bailout_type, - const char* detail) { - DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); + const char* reason) { + DeoptimizeBranch(instr, reason, always, NoReg, -1, override_bailout_type); } void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, - const char* detail) { - DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); + const char* reason) { + DeoptimizeBranch(instr, reason, static_cast<BranchType>(cond)); } void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, - const char* detail) { - DeoptimizeBranch(instr, detail, reg_zero, rt); + const char* reason) { + DeoptimizeBranch(instr, reason, reg_zero, rt); } void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, - const char* detail) { - DeoptimizeBranch(instr, detail, reg_not_zero, rt); + const char* reason) { + DeoptimizeBranch(instr, reason, reg_not_zero, rt); } void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, - const char* detail) { + const char* reason) { int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; - DeoptimizeIfBitSet(rt, sign_bit, instr, detail); + DeoptimizeIfBitSet(rt, sign_bit, instr, reason); } void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, - const char* detail) { - DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail); + const char* reason) { + DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, reason); } void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, - const char* detail) { - DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail); + const char* reason) { + DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, reason); } void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, const char* detail) { + LInstruction* instr, const char* reason) { __ CompareRoot(rt, index); - DeoptimizeIf(eq, instr, detail); + DeoptimizeIf(eq, instr, reason); } void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, const char* detail) { + LInstruction* instr, const char* reason) { __ CompareRoot(rt, index); - DeoptimizeIf(ne, instr, detail); + DeoptimizeIf(ne, instr, reason); } void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, - const char* detail) { + const char* reason) { __ TestForMinusZero(input); - DeoptimizeIf(vs, instr, detail); + DeoptimizeIf(vs, instr, reason); } void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, - const char* detail) { - DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit); + const char* reason) { + DeoptimizeBranch(instr, reason, reg_bit_set, rt, bit); } void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, - const char* detail) { - DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit); + const char* reason) { + DeoptimizeBranch(instr, reason, reg_bit_clear, rt, bit); } @@ -5626,20 +5629,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Register output = ToRegister32(instr->result()); DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); - DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr, - "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr); // A heap number: load value and convert to int32 using non-truncating // function. If the result is out of range, branch to deoptimize. __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); - DeoptimizeIf(ne, instr, "lost precision or NaN"); + __ RecordComment("Deferred TaggedToI: lost precision or NaN"); + DeoptimizeIf(ne, instr); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Cmp(output, 0); __ B(ne, &done); __ Fmov(scratch1, dbl_scratch1); - DeoptimizeIfNegative(scratch1, instr, "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIfNegative(scratch1, instr); } } __ Bind(&done); diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h index e24b03136..40fbc381c 100644 --- a/src/arm64/lithium-codegen-arm64.h +++ b/src/arm64/lithium-codegen-arm64.h @@ -213,35 +213,35 @@ class LCodeGen: public LCodeGenBase { Register temp, LOperand* index, String::Encoding encoding); - void DeoptimizeBranch(LInstruction* instr, const char* detail, + void DeoptimizeBranch(LInstruction* instr, const char* reason, BranchType branch_type, Register reg = NoReg, int bit = -1, Deoptimizer::BailoutType* override_bailout_type = NULL); void Deoptimize(LInstruction* instr, Deoptimizer::BailoutType* override_bailout_type = NULL, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIf(Condition cond, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfZero(Register rt, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfNotZero(Register rt, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfNegative(Register rt, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfSmi(Register rt, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, const char* detail = NULL); + LInstruction* instr, const char* reason = NULL); void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, - LInstruction* instr, const char* detail = NULL); + LInstruction* instr, const char* reason = NULL); void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); MemOperand PrepareKeyedExternalArrayOperand(Register key, Register base, diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 394bb36c6..f78efd289 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -2768,8 +2768,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type, - Label* not_unique_name) { +void MacroAssembler::JumpIfNotUniqueName(Register type, + Label* not_unique_name) { STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); // if ((type is string && type is internalized) || type == SYMBOL_TYPE) { // continue diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h index 33ef43927..e73fc2cec 100644 --- a/src/arm64/macro-assembler-arm64.h +++ b/src/arm64/macro-assembler-arm64.h @@ -1074,7 +1074,7 @@ class MacroAssembler : public Assembler { Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label* failure); - void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name); + void JumpIfNotUniqueName(Register type, Label* not_unique_name); // ---- Calling / Jumping helpers ---- diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc index 129252b49..277275c87 100644 --- a/src/arm64/simulator-arm64.cc +++ b/src/arm64/simulator-arm64.cc @@ -1855,12 +1855,9 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg, void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { if ((address >= stack_limit_) && (address < stack)) { fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); - fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", - static_cast<uint64_t>(stack)); - fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", - static_cast<uint64_t>(address)); - fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", - static_cast<uint64_t>(stack_limit_)); + fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", stack); + fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", address); + fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", stack_limit_); fprintf(stream_, "\n"); FATAL("ACCESS BELOW STACK POINTER"); } diff --git a/src/base/macros.h b/src/base/macros.h index cef088cb8..7a3561878 100644 --- a/src/base/macros.h +++ b/src/base/macros.h @@ -230,7 +230,7 @@ struct CompileAssert {}; // WARNING: if Dest or Source is a non-POD type, the result of the memcpy // is likely to surprise you. template <class Dest, class Source> -V8_INLINE Dest bit_cast(Source const& source) { +inline Dest bit_cast(const Source& source) { COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual); Dest dest; diff --git a/src/builtins.cc b/src/builtins.cc index d0c19e504..4a393cb27 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1289,6 +1289,11 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) { } +static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) { + KeyedLoadIC::GenerateSloppyArguments(masm); +} + + static void Generate_StoreIC_Miss(MacroAssembler* masm) { StoreIC::GenerateMiss(masm); } diff --git a/src/builtins.h b/src/builtins.h index c1ed91df4..f9409da79 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -89,6 +89,7 @@ enum BuiltinExtraArguments { kNoExtraICState) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \ + V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, kNoExtraICState) \ \ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \ \ diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc index 1c43049cb..3a9688a89 100644 --- a/src/code-stubs-hydrogen.cc +++ b/src/code-stubs-hydrogen.cc @@ -71,8 +71,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { MULTIPLE }; - HValue* UnmappedCase(HValue* elements, HValue* key); - HValue* BuildArrayConstructor(ElementsKind kind, AllocationSiteOverrideMode override_mode, ArgumentClass argument_class); @@ -602,122 +600,6 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() { Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); } -HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) { - HValue* result; - HInstruction* backing_store = Add<HLoadKeyed>( - elements, graph()->GetConstant1(), static_cast<HValue*>(NULL), - FAST_ELEMENTS, ALLOW_RETURN_HOLE); - Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map()); - HValue* backing_store_length = - Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL), - HObjectAccess::ForFixedArrayLength()); - IfBuilder in_unmapped_range(this); - in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length, - Token::LT); - in_unmapped_range.Then(); - { - result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL), - FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE); - } - in_unmapped_range.ElseDeopt("Outside of range"); - in_unmapped_range.End(); - return result; -} - - -template <> -HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() { - HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex); - HValue* key = GetParameter(LoadDescriptor::kNameIndex); - - // Mapped arguments are actual arguments. Unmapped arguments are values added - // to the arguments object after it was created for the call. Mapped arguments - // are stored in the context at indexes given by elements[key + 2]. Unmapped - // arguments are stored as regular indexed properties in the arguments array, - // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed - // look at argument object construction. - // - // The sloppy arguments elements array has a special format: - // - // 0: context - // 1: unmapped arguments array - // 2: mapped_index0, - // 3: mapped_index1, - // ... - // - // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments). - // If key + 2 >= elements.length then attempt to look in the unmapped - // arguments array (given by elements[1]) and return the value at key, missing - // to the runtime if the unmapped arguments array is not a fixed array or if - // key >= unmapped_arguments_array.length. - // - // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value - // in the unmapped arguments array, as described above. Otherwise, t is a Smi - // index into the context array given at elements[0]. Return the value at - // context[t]. - - key = AddUncasted<HForceRepresentation>(key, Representation::Smi()); - IfBuilder positive_smi(this); - positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(), - Token::LT); - positive_smi.ThenDeopt("key is negative"); - positive_smi.End(); - - HValue* constant_two = Add<HConstant>(2); - HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL)); - HValue* elements_length = - Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL), - HObjectAccess::ForFixedArrayLength()); - HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two); - IfBuilder in_range(this); - in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT); - in_range.Then(); - { - HValue* index = AddUncasted<HAdd>(key, constant_two); - HInstruction* mapped_index = - Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL), - FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE); - - IfBuilder is_valid(this); - is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index, - graph()->GetConstantHole()); - is_valid.Then(); - { - // TODO(mvstanton): I'd like to assert from this point, that if the - // mapped_index is not the hole that it is indeed, a smi. An unnecessary - // smi check is being emitted. - HValue* the_context = - Add<HLoadKeyed>(elements, graph()->GetConstant0(), - static_cast<HValue*>(NULL), FAST_ELEMENTS); - DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize); - HValue* result = - Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL), - FAST_ELEMENTS, ALLOW_RETURN_HOLE); - environment()->Push(result); - } - is_valid.Else(); - { - HValue* result = UnmappedCase(elements, key); - environment()->Push(result); - } - is_valid.End(); - } - in_range.Else(); - { - HValue* result = UnmappedCase(elements, key); - environment()->Push(result); - } - in_range.End(); - - return environment()->Pop(); -} - - -Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() { - return DoGenerateCode(this); -} - - void CodeStubGraphBuilderBase::BuildStoreNamedField( HValue* object, HValue* value, FieldIndex index, Representation representation) { @@ -1210,6 +1092,7 @@ Handle<Code> ToBooleanStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() { StoreGlobalStub* stub = casted_stub(); + Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate()); Handle<Object> placeholer_value(Smi::FromInt(0), isolate()); Handle<PropertyCell> placeholder_cell = isolate()->factory()->NewPropertyCell(placeholer_value); @@ -1241,7 +1124,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() { // property has been deleted and that the store must be handled by the // runtime. IfBuilder builder(this); - HValue* hole_value = graph()->GetConstantHole(); + HValue* hole_value = Add<HConstant>(hole); builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value); builder.Then(); builder.Deopt("Unexpected cell contents in global store"); diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 5c9e1a2b8..96460c56f 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -586,14 +586,12 @@ void KeyedLoadGenericStub::InitializeDescriptor( void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) { if (kind() == Code::STORE_IC) { descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure)); - } else if (kind() == Code::KEYED_LOAD_IC) { - descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure)); } } CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() { - if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) { + if (kind() == Code::LOAD_IC) { return LoadDescriptor(isolate()); } else { DCHECK_EQ(Code::STORE_IC, kind()); diff --git a/src/code-stubs.h b/src/code-stubs.h index 3b31399a1..f9016f180 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -82,7 +82,6 @@ namespace internal { /* IC Handler stubs */ \ V(LoadConstant) \ V(LoadField) \ - V(KeyedLoadSloppyArguments) \ V(StoreField) \ V(StoreGlobal) \ V(StringLength) @@ -915,20 +914,6 @@ class LoadFieldStub: public HandlerStub { }; -class KeyedLoadSloppyArgumentsStub : public HandlerStub { - public: - explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate) - : HandlerStub(isolate) {} - - protected: - virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } - virtual Code::StubType GetStubType() { return Code::FAST; } - - private: - DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub); -}; - - class LoadConstantStub : public HandlerStub { public: LoadConstantStub(Isolate* isolate, int constant_index) diff --git a/src/compiler.cc b/src/compiler.cc index 68918d684..685009ec5 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -325,6 +325,10 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() { DCHECK(info()->IsOptimizing()); DCHECK(!info()->IsCompilingForDebugging()); + // We should never arrive here if there is no code object on the + // shared function object. + DCHECK(info()->shared_info()->code()->kind() == Code::FUNCTION); + // We should never arrive here if optimization has been disabled on the // shared function info. DCHECK(!info()->shared_info()->optimization_disabled()); @@ -392,8 +396,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() { DCHECK(info()->shared_info()->has_deoptimization_support()); // Check the whitelist for TurboFan. - if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) || - info()->closure()->PassesFilter(FLAG_turbo_filter)) { + if (info()->closure()->PassesFilter(FLAG_turbo_filter)) { compiler::Pipeline pipeline(info()); pipeline.GenerateCode(); if (!info()->code().is_null()) { @@ -701,117 +704,6 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon( } -MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap( - Handle<JSFunction> function, BailoutId osr_ast_id) { - if (FLAG_cache_optimized_code) { - Handle<SharedFunctionInfo> shared(function->shared()); - // Bound functions are not cached. - if (shared->bound()) return MaybeHandle<Code>(); - DisallowHeapAllocation no_gc; - int index = shared->SearchOptimizedCodeMap( - function->context()->native_context(), osr_ast_id); - if (index > 0) { - if (FLAG_trace_opt) { - PrintF("[found optimized code for "); - function->ShortPrint(); - if (!osr_ast_id.IsNone()) { - PrintF(" at OSR AST id %d", osr_ast_id.ToInt()); - } - PrintF("]\n"); - } - FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index); - if (literals != NULL) function->set_literals(literals); - return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index)); - } - } - return MaybeHandle<Code>(); -} - - -static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) { - Handle<Code> code = info->code(); - if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do. - - // Context specialization folds-in the context, so no sharing can occur. - if (code->is_turbofanned() && info->is_context_specializing()) return; - - // Cache optimized code. - if (FLAG_cache_optimized_code) { - Handle<JSFunction> function = info->closure(); - Handle<SharedFunctionInfo> shared(function->shared()); - // Do not cache bound functions. - if (shared->bound()) return; - Handle<FixedArray> literals(function->literals()); - Handle<Context> native_context(function->context()->native_context()); - SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code, - literals, info->osr_ast_id()); - } -} - - -static bool CompileOptimizedPrologue(CompilationInfo* info) { - if (!Parser::Parse(info)) return false; - if (!Rewriter::Rewrite(info)) return false; - if (!Scope::Analyze(info)) return false; - DCHECK(info->scope() != NULL); - return true; -} - - -static bool GetOptimizedCodeNow(CompilationInfo* info) { - if (!CompileOptimizedPrologue(info)) return false; - - TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); - - OptimizedCompileJob job(info); - if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false; - if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false; - if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false; - - // Success! - DCHECK(!info->isolate()->has_pending_exception()); - InsertCodeIntoOptimizedCodeMap(info); - RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, - info->shared_info()); - return true; -} - - -static bool GetOptimizedCodeLater(CompilationInfo* info) { - Isolate* isolate = info->isolate(); - if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) { - if (FLAG_trace_concurrent_recompilation) { - PrintF(" ** Compilation queue full, will retry optimizing "); - info->closure()->PrintName(); - PrintF(" later.\n"); - } - return false; - } - - CompilationHandleScope handle_scope(info); - if (!CompileOptimizedPrologue(info)) return false; - info->SaveHandles(); // Copy handles to the compilation handle scope. - - TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); - - OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info); - OptimizedCompileJob::Status status = job->CreateGraph(); - if (status != OptimizedCompileJob::SUCCEEDED) return false; - isolate->optimizing_compiler_thread()->QueueForOptimization(job); - - if (FLAG_trace_concurrent_recompilation) { - PrintF(" ** Queued "); - info->closure()->PrintName(); - if (info->is_osr()) { - PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt()); - } else { - PrintF(" for concurrent optimization.\n"); - } - } - return true; -} - - MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) { DCHECK(!function->GetIsolate()->has_pending_exception()); DCHECK(!function->is_compiled()); @@ -838,14 +730,29 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) { VMState<COMPILER> state(info.isolate()); PostponeInterruptsScope postpone(info.isolate()); + if (FLAG_trace_opt) { + // TODO(titzer): record and report full stats here. + PrintF("[optimizing asm "); + function->ShortPrint(); + PrintF("]\n"); + } + + if (!Parser::Parse(&info)) return MaybeHandle<Code>(); + if (!Rewriter::Rewrite(&info)) return MaybeHandle<Code>(); + if (!Scope::Analyze(&info)) return MaybeHandle<Code>(); + if (FLAG_turbo_deoptimization && !EnsureDeoptimizationSupport(&info)) { + return MaybeHandle<Code>(); + } + info.SetOptimizing(BailoutId::None(), Handle<Code>(function->shared()->code())); info.MarkAsContextSpecializing(); info.MarkAsTypingEnabled(); info.MarkAsInliningDisabled(); - - if (GetOptimizedCodeNow(&info)) return info.code(); + compiler::Pipeline pipeline(&info); + pipeline.GenerateCode(); + if (!info.code().is_null()) return info.code(); } if (function->shared()->is_compiled()) { @@ -1170,7 +1077,6 @@ Handle<SharedFunctionInfo> Compiler::CompileScript( if (FLAG_serialize_toplevel && compile_options == ScriptCompiler::kConsumeCodeCache && !isolate->debug()->is_loaded()) { - HistogramTimerScope timer(isolate->counters()->compile_deserialize()); return CodeSerializer::Deserialize(isolate, *cached_data, source); } else { maybe_result = compilation_cache->LookupScript( @@ -1217,8 +1123,6 @@ Handle<SharedFunctionInfo> Compiler::CompileScript( compilation_cache->PutScript(source, context, result); if (FLAG_serialize_toplevel && compile_options == ScriptCompiler::kProduceCodeCache) { - HistogramTimerScope histogram_timer( - isolate->counters()->compile_serialize()); *cached_data = CodeSerializer::Serialize(isolate, result, source); if (FLAG_profile_deserialization) { PrintF("[Compiling and serializing %d bytes took %0.3f ms]\n", @@ -1305,6 +1209,118 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo( } +MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap( + Handle<JSFunction> function, + BailoutId osr_ast_id) { + if (FLAG_cache_optimized_code) { + Handle<SharedFunctionInfo> shared(function->shared()); + // Bound functions are not cached. + if (shared->bound()) return MaybeHandle<Code>(); + DisallowHeapAllocation no_gc; + int index = shared->SearchOptimizedCodeMap( + function->context()->native_context(), osr_ast_id); + if (index > 0) { + if (FLAG_trace_opt) { + PrintF("[found optimized code for "); + function->ShortPrint(); + if (!osr_ast_id.IsNone()) { + PrintF(" at OSR AST id %d", osr_ast_id.ToInt()); + } + PrintF("]\n"); + } + FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index); + if (literals != NULL) function->set_literals(literals); + return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index)); + } + } + return MaybeHandle<Code>(); +} + + +static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) { + Handle<Code> code = info->code(); + if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do. + + // Context specialization folds-in the context, so no sharing can occur. + if (code->is_turbofanned() && info->is_context_specializing()) return; + + // Cache optimized code. + if (FLAG_cache_optimized_code) { + Handle<JSFunction> function = info->closure(); + Handle<SharedFunctionInfo> shared(function->shared()); + // Do not cache bound functions. + if (shared->bound()) return; + Handle<FixedArray> literals(function->literals()); + Handle<Context> native_context(function->context()->native_context()); + SharedFunctionInfo::AddToOptimizedCodeMap( + shared, native_context, code, literals, info->osr_ast_id()); + } +} + + +static bool CompileOptimizedPrologue(CompilationInfo* info) { + if (!Parser::Parse(info)) return false; + if (!Rewriter::Rewrite(info)) return false; + if (!Scope::Analyze(info)) return false; + DCHECK(info->scope() != NULL); + return true; +} + + +static bool GetOptimizedCodeNow(CompilationInfo* info) { + if (!CompileOptimizedPrologue(info)) return false; + + TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); + + OptimizedCompileJob job(info); + if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false; + if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false; + if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false; + + // Success! + DCHECK(!info->isolate()->has_pending_exception()); + InsertCodeIntoOptimizedCodeMap(info); + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, + info->shared_info()); + return true; +} + + +static bool GetOptimizedCodeLater(CompilationInfo* info) { + Isolate* isolate = info->isolate(); + if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) { + if (FLAG_trace_concurrent_recompilation) { + PrintF(" ** Compilation queue full, will retry optimizing "); + info->closure()->PrintName(); + PrintF(" later.\n"); + } + return false; + } + + CompilationHandleScope handle_scope(info); + if (!CompileOptimizedPrologue(info)) return false; + info->SaveHandles(); // Copy handles to the compilation handle scope. + + TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); + + OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info); + OptimizedCompileJob::Status status = job->CreateGraph(); + if (status != OptimizedCompileJob::SUCCEEDED) return false; + isolate->optimizing_compiler_thread()->QueueForOptimization(job); + + if (FLAG_trace_concurrent_recompilation) { + PrintF(" ** Queued "); + info->closure()->PrintName(); + if (info->is_osr()) { + PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt()); + } else { + PrintF(" for concurrent optimization.\n"); + } + } + return true; +} + + MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function, Handle<Code> current_code, ConcurrencyMode mode, diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc index c995d11b1..403568222 100644 --- a/src/compiler/arm/code-generator-arm.cc +++ b/src/compiler/arm/code-generator-arm.cc @@ -136,8 +136,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ArmOperandConverter i(this, instr); switch (ArchOpcodeField::decode(instr->opcode())) { + case kArchCallAddress: { + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm(), i.InputRegister(0)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } case kArchCallCodeObject: { - EnsureSpaceForLazyDeopt(); if (instr->InputAt(0)->IsImmediate()) { __ Call(Handle<Code>::cast(i.InputHeapObject(0)), RelocInfo::CODE_TARGET); @@ -151,7 +156,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { break; } case kArchCallJSFunction: { - EnsureSpaceForLazyDeopt(); Register func = i.InputRegister(0); if (FLAG_debug_code) { // Check the function's context matches the context argument. @@ -165,6 +169,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } + case kArchDrop: { + int words = MiscField::decode(instr->opcode()); + __ Drop(words); + DCHECK_LT(0, words); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } case kArchJmp: __ b(code_->GetLabel(i.InputBlock(0))); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -845,27 +856,6 @@ void CodeGenerator::AddNopForSmiCodeInlining() { // On 32-bit ARM we do not insert nops for inlined Smi code. } - -void CodeGenerator::EnsureSpaceForLazyDeopt() { - int space_needed = Deoptimizer::patch_size(); - if (!linkage()->info()->IsStub()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - // Block literal pool emission for duration of padding. - v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm()); - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize); - while (padding_size > 0) { - __ nop(); - padding_size -= v8::internal::Assembler::kInstrSize; - } - } - } - MarkLazyDeoptSite(); -} - #undef __ } // namespace compiler diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc index a37ebf21d..c645fb734 100644 --- a/src/compiler/arm/instruction-selector-arm.cc +++ b/src/compiler/arm/instruction-selector-arm.cc @@ -68,8 +68,10 @@ class ArmOperandGenerator FINAL : public OperandGenerator { case kArmStrh: return value >= -255 && value <= 255; + case kArchCallAddress: case kArchCallCodeObject: case kArchCallJSFunction: + case kArchDrop: case kArchJmp: case kArchNop: case kArchRet: @@ -801,6 +803,9 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, opcode = kArchCallCodeObject; break; } + case CallDescriptor::kCallAddress: + opcode = kArchCallAddress; + break; case CallDescriptor::kCallJSFunction: opcode = kArchCallJSFunction; break; @@ -820,6 +825,13 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, DCHECK(continuation != NULL); call_instr->MarkAsControl(); } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + !buffer.pushed_nodes.empty()) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL); + } } diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index 4a9893f3b..c8d5f26d0 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -131,8 +131,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { Arm64OperandConverter i(this, instr); InstructionCode opcode = instr->opcode(); switch (ArchOpcodeField::decode(opcode)) { + case kArchCallAddress: { + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm(), i.InputRegister(0)); + break; + } case kArchCallCodeObject: { - EnsureSpaceForLazyDeopt(); if (instr->InputAt(0)->IsImmediate()) { __ Call(Handle<Code>::cast(i.InputHeapObject(0)), RelocInfo::CODE_TARGET); @@ -145,7 +149,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { break; } case kArchCallJSFunction: { - EnsureSpaceForLazyDeopt(); Register func = i.InputRegister(0); if (FLAG_debug_code) { // Check the function's context matches the context argument. @@ -160,6 +163,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { AddSafepointAndDeopt(instr); break; } + case kArchDrop: { + int words = MiscField::decode(instr->opcode()); + __ Drop(words); + break; + } case kArchJmp: __ B(code_->GetLabel(i.InputBlock(0))); break; @@ -846,29 +854,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); } - -void CodeGenerator::EnsureSpaceForLazyDeopt() { - int space_needed = Deoptimizer::patch_size(); - if (!linkage()->info()->IsStub()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - intptr_t current_pc = masm()->pc_offset(); - - if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { - intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - DCHECK((padding_size % kInstructionSize) == 0); - InstructionAccurateScope instruction_accurate( - masm(), padding_size / kInstructionSize); - - while (padding_size > 0) { - __ nop(); - padding_size -= kInstructionSize; - } - } - } - MarkLazyDeoptSite(); -} - #undef __ } // namespace compiler diff --git a/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/src/compiler/arm64/instruction-selector-arm64-unittest.cc index b5562c233..9903a5277 100644 --- a/src/compiler/arm64/instruction-selector-arm64-unittest.cc +++ b/src/compiler/arm64/instruction-selector-arm64-unittest.cc @@ -32,26 +32,6 @@ std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) { } -// Helper to build Int32Constant or Int64Constant depending on the given -// machine type. -Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type, - int64_t value) { - switch (type) { - case kMachInt32: - return m.Int32Constant(value); - break; - - case kMachInt64: - return m.Int64Constant(value); - break; - - default: - UNIMPLEMENTED(); - } - return NULL; -} - - // ARM64 logical instructions. static const MachInst2 kLogicalInstructions[] = { {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32}, @@ -306,13 +286,13 @@ TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) { const MachineType type = dpi.machine_type; TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { StreamBuilder m(this, type, type); - m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm))); + m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); - EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); EXPECT_EQ(1U, s[0]->OutputCount()); } } @@ -324,7 +304,7 @@ TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) { TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { StreamBuilder m(this, type, type); - m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0))); + m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0))); Stream s = m.Build(); // Add can support an immediate on the left by commuting, but Sub can't @@ -334,7 +314,7 @@ TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) { EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); - EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); EXPECT_EQ(1U, s[0]->OutputCount()); } } @@ -1024,35 +1004,38 @@ TEST_P(InstructionSelectorComparisonTest, WithParameters) { TEST_P(InstructionSelectorComparisonTest, WithImmediate) { const MachInst2 cmp = GetParam(); const MachineType type = cmp.machine_type; - TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { - // Compare with 0 are turned into tst instruction. - if (imm == 0) continue; - StreamBuilder m(this, type, type); - m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode()); - ASSERT_EQ(2U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1))); - EXPECT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(kFlags_set, s[0]->flags_mode()); - EXPECT_EQ(kEqual, s[0]->flags_condition()); - } - TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { - // Compare with 0 are turned into tst instruction. - if (imm == 0) continue; - StreamBuilder m(this, type, type); - m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode()); - ASSERT_EQ(2U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1))); - EXPECT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(kFlags_set, s[0]->flags_mode()); - EXPECT_EQ(kEqual, s[0]->flags_condition()); + // TODO(all): Add support for testing 64-bit immediates. + if (type == kMachInt32) { + TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { + // Compare with 0 are turned into tst instruction. + if (imm == 0) continue; + StreamBuilder m(this, type, type); + m.Return((m.*cmp.constructor)(m.Parameter(0), m.Int32Constant(imm))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode()); + ASSERT_EQ(2U, s[0]->InputCount()); + ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); + EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(1U, s[0]->OutputCount()); + EXPECT_EQ(kFlags_set, s[0]->flags_mode()); + EXPECT_EQ(kEqual, s[0]->flags_condition()); + } + TRACED_FOREACH(int32_t, imm, kAddSubImmediates) { + // Compare with 0 are turned into tst instruction. + if (imm == 0) continue; + StreamBuilder m(this, type, type); + m.Return((m.*cmp.constructor)(m.Int32Constant(imm), m.Parameter(0))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode()); + ASSERT_EQ(2U, s[0]->InputCount()); + ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); + EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(1U, s[0]->OutputCount()); + EXPECT_EQ(kFlags_set, s[0]->flags_mode()); + EXPECT_EQ(kEqual, s[0]->flags_condition()); + } } } diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index eac1ec63e..7c88ee9a6 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -37,13 +37,9 @@ class Arm64OperandGenerator FINAL : public OperandGenerator { } bool CanBeImmediate(Node* node, ImmediateMode mode) { - int64_t value; - if (node->opcode() == IrOpcode::kInt32Constant) - value = OpParameter<int32_t>(node); - else if (node->opcode() == IrOpcode::kInt64Constant) - value = OpParameter<int64_t>(node); - else - return false; + Int32Matcher m(node); + if (!m.HasValue()) return false; + int64_t value = m.Value(); unsigned ignored; switch (mode) { case kLogical32Imm: @@ -111,12 +107,11 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, // Shared routine for multiple binary operations. -template <typename Matcher> static void VisitBinop(InstructionSelector* selector, Node* node, InstructionCode opcode, ImmediateMode operand_mode, FlagsContinuation* cont) { Arm64OperandGenerator g(selector); - Matcher m(node); + Int32BinopMatcher m(node); InstructionOperand* inputs[4]; size_t input_count = 0; InstructionOperand* outputs[2]; @@ -147,11 +142,10 @@ static void VisitBinop(InstructionSelector* selector, Node* node, // Shared routine for multiple binary operations. -template <typename Matcher> static void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode, ImmediateMode operand_mode) { FlagsContinuation cont; - VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont); + VisitBinop(selector, node, opcode, operand_mode, &cont); } @@ -268,22 +262,22 @@ void InstructionSelector::VisitStore(Node* node) { void InstructionSelector::VisitWord32And(Node* node) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm); + VisitBinop(this, node, kArm64And32, kLogical32Imm); } void InstructionSelector::VisitWord64And(Node* node) { - VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm); + VisitBinop(this, node, kArm64And, kLogical64Imm); } void InstructionSelector::VisitWord32Or(Node* node) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm); + VisitBinop(this, node, kArm64Or32, kLogical32Imm); } void InstructionSelector::VisitWord64Or(Node* node) { - VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm); + VisitBinop(this, node, kArm64Or, kLogical64Imm); } @@ -293,7 +287,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) { if (m.right().Is(-1)) { Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node())); } else { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm); + VisitBinop(this, node, kArm64Xor32, kLogical32Imm); } } @@ -304,7 +298,7 @@ void InstructionSelector::VisitWord64Xor(Node* node) { if (m.right().Is(-1)) { Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); } else { - VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm); + VisitBinop(this, node, kArm64Xor, kLogical32Imm); } } @@ -350,12 +344,12 @@ void InstructionSelector::VisitWord64Ror(Node* node) { void InstructionSelector::VisitInt32Add(Node* node) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm); + VisitBinop(this, node, kArm64Add32, kArithmeticImm); } void InstructionSelector::VisitInt64Add(Node* node) { - VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm); + VisitBinop(this, node, kArm64Add, kArithmeticImm); } @@ -366,7 +360,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) { Emit(kArm64Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node())); } else { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm); + VisitBinop(this, node, kArm64Sub32, kArithmeticImm); } } @@ -377,7 +371,7 @@ void InstructionSelector::VisitInt64Sub(Node* node) { if (m.left().Is(0)) { Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); } else { - VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm); + VisitBinop(this, node, kArm64Sub, kArithmeticImm); } } @@ -508,13 +502,13 @@ void InstructionSelector::VisitFloat64Mod(Node* node) { void InstructionSelector::VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont); + VisitBinop(this, node, kArm64Add32, kArithmeticImm, cont); } void InstructionSelector::VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont) { - VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont); + VisitBinop(this, node, kArm64Sub32, kArithmeticImm, cont); } @@ -630,8 +624,12 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, InitializeCallBuffer(call, &buffer, true, false); // Push the arguments to the stack. + bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress; bool pushed_count_uneven = buffer.pushed_nodes.size() & 1; int aligned_push_count = buffer.pushed_nodes.size(); + if (is_c_frame && pushed_count_uneven) { + aligned_push_count++; + } // TODO(dcarney): claim and poke probably take small immediates, // loop here or whatever. // Bump the stack pointer(s). @@ -646,7 +644,8 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, // Emit the uneven pushes. if (pushed_count_uneven) { Node* input = buffer.pushed_nodes[slot]; - Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input)); + ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke; + Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input)); slot--; } // Now all pushes can be done in pairs. @@ -664,6 +663,9 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, opcode = kArchCallCodeObject; break; } + case CallDescriptor::kCallAddress: + opcode = kArchCallAddress; + break; case CallDescriptor::kCallJSFunction: opcode = kArchCallJSFunction; break; @@ -683,6 +685,12 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, DCHECK(continuation != NULL); call_instr->MarkAsControl(); } + + // Caller clean up of stack for C-style calls. + if (is_c_frame && aligned_push_count > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL); + } } } // namespace compiler diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc index f22c47978..ae0e10248 100644 --- a/src/compiler/code-generator.cc +++ b/src/compiler/code-generator.cc @@ -21,8 +21,7 @@ CodeGenerator::CodeGenerator(InstructionSequence* code) safepoints_(code->zone()), deoptimization_states_(code->zone()), deoptimization_literals_(code->zone()), - translations_(code->zone()), - last_lazy_deopt_pc_(0) {} + translations_(code->zone()) {} Handle<Code> CodeGenerator::GenerateCode() { @@ -243,7 +242,6 @@ void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) { } if (needs_frame_state) { - MarkLazyDeoptSite(); // If the frame state is present, it starts at argument 1 // (just after the code address). InstructionOperandConverter converter(this, instr); @@ -389,7 +387,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation, isolate()->factory()->NewNumberFromInt(constant.ToInt32()); break; case Constant::kFloat64: - constant_object = isolate()->factory()->NewNumber(constant.ToFloat64()); + constant_object = + isolate()->factory()->NewHeapNumber(constant.ToFloat64()); break; case Constant::kHeapObject: constant_object = constant.ToHeapObject(); @@ -404,11 +403,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation, } } - -void CodeGenerator::MarkLazyDeoptSite() { - last_lazy_deopt_pc_ = masm()->pc_offset(); -} - #if !V8_TURBOFAN_BACKEND void CodeGenerator::AssembleArchInstruction(Instruction* instr) { diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h index ddc2f9adb..dfc98cd1e 100644 --- a/src/compiler/code-generator.h +++ b/src/compiler/code-generator.h @@ -98,10 +98,8 @@ class CodeGenerator FINAL : public GapResolver::Assembler { void AddTranslationForOperand(Translation* translation, Instruction* instr, InstructionOperand* op); void AddNopForSmiCodeInlining(); - void EnsureSpaceForLazyDeopt(); - void MarkLazyDeoptSite(); - // =========================================================================== + struct DeoptimizationState : ZoneObject { public: BailoutId bailout_id() const { return bailout_id_; } @@ -128,7 +126,6 @@ class CodeGenerator FINAL : public GapResolver::Assembler { ZoneDeque<DeoptimizationState*> deoptimization_states_; ZoneDeque<Handle<Object> > deoptimization_literals_; TranslationBuffer translations_; - int last_lazy_deopt_pc_; }; } // namespace compiler diff --git a/src/compiler/common-operator-unittest.cc b/src/compiler/common-operator-unittest.cc index 5001770dd..128f8dded 100644 --- a/src/compiler/common-operator-unittest.cc +++ b/src/compiler/common-operator-unittest.cc @@ -4,8 +4,6 @@ #include "src/compiler/common-operator.h" -#include <limits> - #include "src/compiler/operator-properties-inl.h" #include "src/test/test-utils.h" @@ -134,26 +132,9 @@ class CommonOperatorTest : public TestWithZone { const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt}; -const float kFloat32Values[] = { - std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f, - std::numeric_limits<float>::max()}; - } // namespace -TEST_F(CommonOperatorTest, Float32Constant) { - TRACED_FOREACH(float, value, kFloat32Values) { - const Operator* op = common()->Float32Constant(value); - EXPECT_FLOAT_EQ(value, OpParameter<float>(op)); - EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op)); - EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op)); - EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op)); - EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op)); - EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op)); - } -} - - TEST_F(CommonOperatorTest, ValueEffect) { TRACED_FOREACH(int, arguments, kArguments) { const Operator* op = common()->ValueEffect(arguments); diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc index 19792bd1d..903484306 100644 --- a/src/compiler/common-operator.cc +++ b/src/compiler/common-operator.cc @@ -137,13 +137,6 @@ const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) { } -const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) { - return new (zone()) - Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1, - "Float32Constant", value); -} - - const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) { return new (zone()) Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1, diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h index a3659adfc..52c0af29f 100644 --- a/src/compiler/common-operator.h +++ b/src/compiler/common-operator.h @@ -84,7 +84,6 @@ class CommonOperatorBuilder FINAL { const Operator* Int32Constant(int32_t); const Operator* Int64Constant(int64_t); - const Operator* Float32Constant(volatile float); const Operator* Float64Constant(volatile double); const Operator* ExternalConstant(const ExternalReference&); const Operator* NumberConstant(volatile double); diff --git a/src/compiler/graph-unittest.cc b/src/compiler/graph-unittest.cc index 58adb112c..f7faa6de2 100644 --- a/src/compiler/graph-unittest.cc +++ b/src/compiler/graph-unittest.cc @@ -44,12 +44,7 @@ Node* GraphTest::Parameter(int32_t index) { } -Node* GraphTest::Float32Constant(volatile float value) { - return graph()->NewNode(common()->Float32Constant(value)); -} - - -Node* GraphTest::Float64Constant(volatile double value) { +Node* GraphTest::Float64Constant(double value) { return graph()->NewNode(common()->Float64Constant(value)); } @@ -64,7 +59,7 @@ Node* GraphTest::Int64Constant(int64_t value) { } -Node* GraphTest::NumberConstant(volatile double value) { +Node* GraphTest::NumberConstant(double value) { return graph()->NewNode(common()->NumberConstant(value)); } @@ -669,12 +664,6 @@ Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) { } -Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) { - return MakeMatcher( - new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher)); -} - - Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) { return MakeMatcher( new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher)); diff --git a/src/compiler/graph-unittest.h b/src/compiler/graph-unittest.h index 3cda9b7b1..42e4dd9bd 100644 --- a/src/compiler/graph-unittest.h +++ b/src/compiler/graph-unittest.h @@ -31,11 +31,10 @@ class GraphTest : public TestWithContext, public TestWithZone { protected: Node* Parameter(int32_t index); - Node* Float32Constant(volatile float value); - Node* Float64Constant(volatile double value); + Node* Float64Constant(double value); Node* Int32Constant(int32_t value); Node* Int64Constant(int64_t value); - Node* NumberConstant(volatile double value); + Node* NumberConstant(double value); Node* HeapConstant(const Unique<HeapObject>& value); Node* FalseConstant(); Node* TrueConstant(); @@ -66,7 +65,6 @@ Matcher<Node*> IsExternalConstant( const Matcher<ExternalReference>& value_matcher); Matcher<Node*> IsHeapConstant( const Matcher<Unique<HeapObject> >& value_matcher); -Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher); Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher); Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher); Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher); diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc index 200dcb660..c476eaa9e 100644 --- a/src/compiler/ia32/code-generator-ia32.cc +++ b/src/compiler/ia32/code-generator-ia32.cc @@ -111,8 +111,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { IA32OperandConverter i(this, instr); switch (ArchOpcodeField::decode(instr->opcode())) { + case kArchCallAddress: + if (HasImmediateInput(instr, 0)) { + // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY. + __ call(reinterpret_cast<byte*>(i.InputInt32(0)), + RelocInfo::RUNTIME_ENTRY); + } else { + __ call(i.InputRegister(0)); + } + break; case kArchCallCodeObject: { - EnsureSpaceForLazyDeopt(); if (HasImmediateInput(instr, 0)) { Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); __ call(code, RelocInfo::CODE_TARGET); @@ -124,7 +132,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { break; } case kArchCallJSFunction: { - EnsureSpaceForLazyDeopt(); Register func = i.InputRegister(0); if (FLAG_debug_code) { // Check the function's context matches the context argument. @@ -135,6 +142,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { AddSafepointAndDeopt(instr); break; } + case kArchDrop: { + int words = MiscField::decode(instr->opcode()); + __ add(esp, Immediate(kPointerSize * words)); + break; + } case kArchJmp: __ jmp(code()->GetLabel(i.InputBlock(0))); break; @@ -934,21 +946,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } - -void CodeGenerator::EnsureSpaceForLazyDeopt() { - int space_needed = Deoptimizer::patch_size(); - if (!linkage()->info()->IsStub()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - __ Nop(padding_size); - } - } - MarkLazyDeoptSite(); -} - #undef __ } // namespace compiler diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc index ce8cb0f63..42702c172 100644 --- a/src/compiler/ia32/instruction-selector-ia32.cc +++ b/src/compiler/ia32/instruction-selector-ia32.cc @@ -531,6 +531,9 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, opcode = kArchCallCodeObject; break; } + case CallDescriptor::kCallAddress: + opcode = kArchCallAddress; + break; case CallDescriptor::kCallJSFunction: opcode = kArchCallJSFunction; break; @@ -550,6 +553,13 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, DCHECK(continuation != NULL); call_instr->MarkAsControl(); } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + buffer.pushed_nodes.size() > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL); + } } } // namespace compiler diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h index 2d921bd64..669316a43 100644 --- a/src/compiler/instruction-codes.h +++ b/src/compiler/instruction-codes.h @@ -29,8 +29,10 @@ namespace compiler { // Target-specific opcodes that specify which assembly sequence to emit. // Most opcodes specify a single instruction. #define ARCH_OPCODE_LIST(V) \ + V(ArchCallAddress) \ V(ArchCallCodeObject) \ V(ArchCallJSFunction) \ + V(ArchDrop) \ V(ArchJmp) \ V(ArchNop) \ V(ArchRet) \ diff --git a/src/compiler/instruction-selector-unittest.h b/src/compiler/instruction-selector-unittest.h index 4e12dabbe..c236853f0 100644 --- a/src/compiler/instruction-selector-unittest.h +++ b/src/compiler/instruction-selector-unittest.h @@ -147,10 +147,6 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone { return ToConstant(operand).ToInt32(); } - int64_t ToInt64(const InstructionOperand* operand) const { - return ToConstant(operand).ToInt64(); - } - int ToVreg(const InstructionOperand* operand) const { if (operand->IsConstant()) return operand->index(); EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind()); diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc index 83161e14a..42becb307 100644 --- a/src/compiler/js-builtin-reducer.cc +++ b/src/compiler/js-builtin-reducer.cc @@ -34,49 +34,34 @@ class JSCallReduction { // constant callee being a well-known builtin with a BuiltinFunctionId. bool HasBuiltinFunctionId() { if (node_->opcode() != IrOpcode::kJSCallFunction) return false; - HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0)); - if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false; - Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle()); - return function->shared()->HasBuiltinFunctionId(); + HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0)); + return m.HasValue() && m.Value().handle()->shared()->HasBuiltinFunctionId(); } // Retrieves the BuiltinFunctionId as described above. BuiltinFunctionId GetBuiltinFunctionId() { DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode()); - HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0)); - Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle()); - return function->shared()->builtin_function_id(); + HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0)); + return m.Value().handle()->shared()->builtin_function_id(); } - // Determines whether the call takes zero inputs. - bool InputsMatchZero() { return GetJSCallArity() == 0; } - // Determines whether the call takes one input of the given type. - bool InputsMatchOne(Type* t1) { + bool InputsMatch(Type* t1) { return GetJSCallArity() == 1 && NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1); } // Determines whether the call takes two inputs of the given types. - bool InputsMatchTwo(Type* t1, Type* t2) { + bool InputsMatch(Type* t1, Type* t2) { return GetJSCallArity() == 2 && NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) && NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2); } - // Determines whether the call takes inputs all of the given type. - bool InputsMatchAll(Type* t) { - for (int i = 0; i < GetJSCallArity(); i++) { - if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) { - return false; - } - } - return true; - } - Node* left() { return GetJSCallInput(0); } Node* right() { return GetJSCallInput(1); } + protected: int GetJSCallArity() { DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode()); // Skip first (i.e. callee) and second (i.e. receiver) operand. @@ -95,42 +80,10 @@ class JSCallReduction { }; -// ECMA-262, section 15.8.2.11. -Reduction JSBuiltinReducer::ReduceMathMax(Node* node) { - JSCallReduction r(node); - if (r.InputsMatchZero()) { - // Math.max() -> -Infinity - return Replace(jsgraph()->Constant(-V8_INFINITY)); - } - if (r.InputsMatchOne(Type::Number())) { - // Math.max(a:number) -> a - return Replace(r.left()); - } - if (r.InputsMatchAll(Type::Integral32())) { - // Math.max(a:int32, b:int32, ...) - Node* value = r.GetJSCallInput(0); - for (int i = 1; i < r.GetJSCallArity(); i++) { - Node* p = r.GetJSCallInput(i); - Node* control = graph()->start(); - Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p); - - Node* branch = graph()->NewNode(common()->Branch(), tag, control); - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); - - value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge); - } - return Replace(value); - } - return NoChange(); -} - - // ES6 draft 08-24-14, section 20.2.2.19. Reduction JSBuiltinReducer::ReduceMathImul(Node* node) { JSCallReduction r(node); - if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) { + if (r.InputsMatch(Type::Integral32(), Type::Integral32())) { // Math.imul(a:int32, b:int32) -> Int32Mul(a, b) Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right()); return Replace(value); @@ -145,8 +98,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) { // Dispatch according to the BuiltinFunctionId if present. if (!r.HasBuiltinFunctionId()) return NoChange(); switch (r.GetBuiltinFunctionId()) { - case kMathMax: - return ReplaceWithPureReduction(node, ReduceMathMax(node)); case kMathImul: return ReplaceWithPureReduction(node, ReduceMathImul(node)); default: diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h index 92c7b4ae6..bbff3ef5e 100644 --- a/src/compiler/js-builtin-reducer.h +++ b/src/compiler/js-builtin-reducer.h @@ -24,13 +24,11 @@ class JSBuiltinReducer FINAL : public Reducer { virtual Reduction Reduce(Node* node) OVERRIDE; private: - JSGraph* jsgraph() const { return jsgraph_; } - Graph* graph() const { return jsgraph_->graph(); } - CommonOperatorBuilder* common() const { return jsgraph_->common(); } - MachineOperatorBuilder* machine() const { return jsgraph_->machine(); } + Graph* graph() { return jsgraph_->graph(); } + CommonOperatorBuilder* common() { return jsgraph_->common(); } + MachineOperatorBuilder* machine() { return jsgraph_->machine(); } SimplifiedOperatorBuilder* simplified() { return &simplified_; } - Reduction ReduceMathMax(Node* node); Reduction ReduceMathImul(Node* node); JSGraph* jsgraph_; diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h index 2b2dfd114..a907bc5bc 100644 --- a/src/compiler/js-graph.h +++ b/src/compiler/js-graph.h @@ -65,9 +65,6 @@ class JSGraph : public ZoneObject { // Creates a Int32Constant node, usually canonicalized. Node* Int32Constant(int32_t value); - Node* Uint32Constant(uint32_t value) { - return Int32Constant(bit_cast<int32_t>(value)); - } // Creates a Float64Constant node, usually canonicalized. Node* Float64Constant(double value); @@ -112,7 +109,6 @@ class JSGraph : public ZoneObject { Factory* factory() { return isolate()->factory(); } }; - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc index 130c5cbcb..bc3ea82bc 100644 --- a/src/compiler/js-typed-lowering.cc +++ b/src/compiler/js-typed-lowering.cc @@ -571,14 +571,13 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) { // TODO(mstarzinger): This lowering is not correct if: // a) The typed array turns external (i.e. MaterializeArrayBuffer) // b) The typed array or it's buffer is neutered. + // c) The index is out of bounds if (key_type->Is(Type::Integral32()) && base_type->IsConstant() && base_type->AsConstant()->Value()->IsJSTypedArray()) { // JSStoreProperty(typed-array, int32, value) JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value()); ElementsKind elements_kind = array->map()->elements_kind(); ExternalArrayType type = array->type(); - uint32_t length; - CHECK(array->length()->ToUint32(&length)); ElementAccess element_access; Node* elements = graph()->NewNode( simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base, @@ -592,24 +591,11 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) { DCHECK(IsFixedTypedArrayElementsKind(elements_kind)); element_access = AccessBuilder::ForTypedArrayElement(type, false); } - - Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, - jsgraph()->Uint32Constant(length)); - Node* branch = graph()->NewNode(common()->Branch(), check, - NodeProperties::GetControlInput(node)); - - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* store = graph()->NewNode( - simplified()->StoreElement(element_access), elements, key, value, - NodeProperties::GetEffectInput(node), if_true); - - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - - Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); - Node* phi = graph()->NewNode(common()->EffectPhi(2), store, - NodeProperties::GetEffectInput(node), merge); - - return ReplaceWith(phi); + Node* store = + graph()->NewNode(simplified()->StoreElement(element_access), elements, + key, value, NodeProperties::GetEffectInput(node), + NodeProperties::GetControlInput(node)); + return ReplaceEagerly(node, store); } return NoChange(); } diff --git a/src/compiler/machine-operator-reducer-unittest.cc b/src/compiler/machine-operator-reducer-unittest.cc index f3073ab79..616f5d4eb 100644 --- a/src/compiler/machine-operator-reducer-unittest.cc +++ b/src/compiler/machine-operator-reducer-unittest.cc @@ -46,43 +46,6 @@ class MachineOperatorReducerTestWithParam namespace { -static const float kFloat32Values[] = { - -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f, - -1.22813e+35f, -1.20555e+35f, -1.34584e+34f, - -1.0079e+32f, -6.49364e+26f, -3.06077e+25f, - -1.46821e+25f, -1.17658e+23f, -1.9617e+22f, - -2.7357e+20f, -1.48708e+13f, -1.89633e+12f, - -4.66622e+11f, -2.22581e+11f, -1.45381e+10f, - -1.3956e+09f, -1.32951e+09f, -1.30721e+09f, - -1.19756e+09f, -9.26822e+08f, -6.35647e+08f, - -4.00037e+08f, -1.81227e+08f, -5.09256e+07f, - -964300.0f, -192446.0f, -28455.0f, - -27194.0f, -26401.0f, -20575.0f, - -17069.0f, -9167.0f, -960.178f, - -113.0f, -62.0f, -15.0f, - -7.0f, -0.0256635f, -4.60374e-07f, - -3.63759e-10f, -4.30175e-14f, -5.27385e-15f, - -1.48084e-15f, -1.05755e-19f, -3.2995e-21f, - -1.67354e-23f, -1.11885e-23f, -1.78506e-30f, - -5.07594e-31f, -3.65799e-31f, -1.43718e-34f, - -1.27126e-38f, -0.0f, 0.0f, - 1.17549e-38f, 1.56657e-37f, 4.08512e-29f, - 3.31357e-28f, 6.25073e-22f, 4.1723e-13f, - 1.44343e-09f, 5.27004e-08f, 9.48298e-08f, - 5.57888e-07f, 4.89988e-05f, 0.244326f, - 12.4895f, 19.0f, 47.0f, - 106.0f, 538.324f, 564.536f, - 819.124f, 7048.0f, 12611.0f, - 19878.0f, 20309.0f, 797056.0f, - 1.77219e+09f, 1.51116e+11f, 4.18193e+13f, - 3.59167e+16f, 3.38211e+19f, 2.67488e+20f, - 1.78831e+21f, 9.20914e+21f, 8.35654e+23f, - 1.4495e+24f, 5.94015e+25f, 4.43608e+30f, - 2.44502e+33f, 2.61152e+33f, 1.38178e+37f, - 1.71306e+37f, 3.31899e+38f, 3.40282e+38f, - std::numeric_limits<float>::infinity()}; - - static const double kFloat64Values[] = { -V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212, -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100, @@ -202,7 +165,7 @@ static const uint32_t kUint32Values[] = { namespace { struct UnaryOperator { - const Operator* (MachineOperatorBuilder::*constructor)(); + const Operator* (MachineOperatorBuilder::*constructor)() const; const char* constructor_name; }; @@ -243,20 +206,6 @@ INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest, // ----------------------------------------------------------------------------- -// ChangeFloat64ToFloat32 - - -TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) { - TRACED_FOREACH(float, x, kFloat32Values) { - Reduction reduction = Reduce(graph()->NewNode( - machine()->ChangeFloat32ToFloat64(), Float32Constant(x))); - ASSERT_TRUE(reduction.Changed()); - EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x)); - } -} - - -// ----------------------------------------------------------------------------- // ChangeFloat64ToInt32 @@ -366,31 +315,6 @@ TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) { // ----------------------------------------------------------------------------- -// TruncateFloat64ToFloat32 - - -TEST_F(MachineOperatorReducerTest, - TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) { - Node* value = Parameter(0); - Reduction reduction = Reduce(graph()->NewNode( - machine()->TruncateFloat64ToFloat32(), - graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value))); - ASSERT_TRUE(reduction.Changed()); - EXPECT_EQ(value, reduction.replacement()); -} - - -TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) { - TRACED_FOREACH(double, x, kFloat64Values) { - Reduction reduction = Reduce(graph()->NewNode( - machine()->TruncateFloat64ToFloat32(), Float64Constant(x))); - ASSERT_TRUE(reduction.Changed()); - EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x))); - } -} - - -// ----------------------------------------------------------------------------- // TruncateFloat64ToInt32 diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index 53ee81075..936deca8b 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -21,11 +21,6 @@ MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph) MachineOperatorReducer::~MachineOperatorReducer() {} -Node* MachineOperatorReducer::Float32Constant(volatile float value) { - return graph()->NewNode(common()->Float32Constant(value)); -} - - Node* MachineOperatorReducer::Float64Constant(volatile double value) { return jsgraph()->Float64Constant(value); } @@ -388,11 +383,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { } break; } - case IrOpcode::kChangeFloat32ToFloat64: { - Float32Matcher m(node->InputAt(0)); - if (m.HasValue()) return ReplaceFloat64(m.Value()); - break; - } case IrOpcode::kChangeFloat64ToInt32: { Float64Matcher m(node->InputAt(0)); if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value())); @@ -437,12 +427,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0)); break; } - case IrOpcode::kTruncateFloat64ToFloat32: { - Float64Matcher m(node->InputAt(0)); - if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value())); - if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0)); - break; - } // TODO(turbofan): strength-reduce and fold floating point operations. default: break; diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h index c79ceae20..57fcdee8e 100644 --- a/src/compiler/machine-operator-reducer.h +++ b/src/compiler/machine-operator-reducer.h @@ -27,15 +27,11 @@ class MachineOperatorReducer FINAL : public Reducer { virtual Reduction Reduce(Node* node) OVERRIDE; private: - Node* Float32Constant(volatile float value); Node* Float64Constant(volatile double value); Node* Int32Constant(int32_t value); Node* Int64Constant(int64_t value); Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); } - Reduction ReplaceFloat32(volatile float value) { - return Replace(Float32Constant(value)); - } Reduction ReplaceFloat64(volatile double value) { return Replace(Float64Constant(value)); } diff --git a/src/compiler/machine-operator-unittest.cc b/src/compiler/machine-operator-unittest.cc index 6aaf06f75..584287240 100644 --- a/src/compiler/machine-operator-unittest.cc +++ b/src/compiler/machine-operator-unittest.cc @@ -169,7 +169,7 @@ INSTANTIATE_TEST_CASE_P( namespace { struct PureOperator { - const Operator* (MachineOperatorBuilder::*constructor)(); + const Operator* (MachineOperatorBuilder::*constructor)() const; IrOpcode::Value opcode; int value_input_count; int value_output_count; @@ -187,33 +187,32 @@ const PureOperator kPureOperators[] = { &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \ output_count \ } - PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1), - PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1), - PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1), - PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1), - PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1), - PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1), - PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1), - PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1), - PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2), - PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2), - PURE(Int32Mul, 2, 1), PURE(Int32Div, 2, 1), - PURE(Int32UDiv, 2, 1), PURE(Int32Mod, 2, 1), - PURE(Int32UMod, 2, 1), PURE(Int32LessThan, 2, 1), - PURE(Int32LessThanOrEqual, 2, 1), PURE(Uint32LessThan, 2, 1), - PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1), - PURE(Int64Sub, 2, 1), PURE(Int64Mul, 2, 1), - PURE(Int64Div, 2, 1), PURE(Int64UDiv, 2, 1), - PURE(Int64Mod, 2, 1), PURE(Int64UMod, 2, 1), - PURE(Int64LessThan, 2, 1), PURE(Int64LessThanOrEqual, 2, 1), - PURE(ChangeFloat32ToFloat64, 1, 1), PURE(ChangeFloat64ToInt32, 1, 1), - PURE(ChangeFloat64ToUint32, 1, 1), PURE(ChangeInt32ToInt64, 1, 1), - PURE(ChangeUint32ToFloat64, 1, 1), PURE(ChangeUint32ToUint64, 1, 1), - PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1), - PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1), - PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1), - PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1), - PURE(Float64Equal, 2, 1), PURE(Float64LessThan, 2, 1), + PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1), + PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1), + PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1), + PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1), + PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1), + PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1), + PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1), + PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1), + PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2), + PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2), + PURE(Int32Mul, 2, 1), PURE(Int32Div, 2, 1), + PURE(Int32UDiv, 2, 1), PURE(Int32Mod, 2, 1), + PURE(Int32UMod, 2, 1), PURE(Int32LessThan, 2, 1), + PURE(Int32LessThanOrEqual, 2, 1), PURE(Uint32LessThan, 2, 1), + PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1), + PURE(Int64Sub, 2, 1), PURE(Int64Mul, 2, 1), + PURE(Int64Div, 2, 1), PURE(Int64UDiv, 2, 1), + PURE(Int64Mod, 2, 1), PURE(Int64UMod, 2, 1), + PURE(Int64LessThan, 2, 1), PURE(Int64LessThanOrEqual, 2, 1), + PURE(ChangeFloat64ToInt32, 1, 1), PURE(ChangeFloat64ToUint32, 1, 1), + PURE(ChangeInt32ToInt64, 1, 1), PURE(ChangeUint32ToFloat64, 1, 1), + PURE(ChangeUint32ToUint64, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1), + PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1), + PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1), + PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1), + PURE(Float64Equal, 2, 1), PURE(Float64LessThan, 2, 1), PURE(Float64LessThanOrEqual, 2, 1) #undef PURE }; diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index eb3e948dd..9dea0373a 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -97,14 +97,12 @@ struct StaticParameterTraits<LoadRepresentation> { V(Int64UMod, Operator::kNoProperties, 2, 1) \ V(Int64LessThan, Operator::kNoProperties, 2, 1) \ V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1) \ - V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1) \ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1) \ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1) \ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1) \ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1) \ - V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1) \ V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1) \ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1) \ V(Float64Add, Operator::kCommutative, 2, 1) \ @@ -196,12 +194,14 @@ MachineOperatorBuilder::MachineOperatorBuilder(MachineType word) #define PURE(Name, properties, input_count, output_count) \ - const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; } + const Operator* MachineOperatorBuilder::Name() const { \ + return &impl_.k##Name; \ + } PURE_OP_LIST(PURE) #undef PURE -const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) { +const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) const { switch (rep) { #define LOAD(Type) \ case k##Type: \ @@ -217,7 +217,7 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) { } -const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) { +const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) const { switch (rep.machine_type()) { #define STORE(Type) \ case k##Type: \ diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h index 23b7ef645..ec911f49a 100644 --- a/src/compiler/machine-operator.h +++ b/src/compiler/machine-operator.h @@ -62,84 +62,84 @@ class MachineOperatorBuilder FINAL { public: explicit MachineOperatorBuilder(MachineType word = kMachPtr); - const Operator* Word32And(); - const Operator* Word32Or(); - const Operator* Word32Xor(); - const Operator* Word32Shl(); - const Operator* Word32Shr(); - const Operator* Word32Sar(); - const Operator* Word32Ror(); - const Operator* Word32Equal(); - - const Operator* Word64And(); - const Operator* Word64Or(); - const Operator* Word64Xor(); - const Operator* Word64Shl(); - const Operator* Word64Shr(); - const Operator* Word64Sar(); - const Operator* Word64Ror(); - const Operator* Word64Equal(); - - const Operator* Int32Add(); - const Operator* Int32AddWithOverflow(); - const Operator* Int32Sub(); - const Operator* Int32SubWithOverflow(); - const Operator* Int32Mul(); - const Operator* Int32Div(); - const Operator* Int32UDiv(); - const Operator* Int32Mod(); - const Operator* Int32UMod(); - const Operator* Int32LessThan(); - const Operator* Int32LessThanOrEqual(); - const Operator* Uint32LessThan(); - const Operator* Uint32LessThanOrEqual(); - - const Operator* Int64Add(); - const Operator* Int64Sub(); - const Operator* Int64Mul(); - const Operator* Int64Div(); - const Operator* Int64UDiv(); - const Operator* Int64Mod(); - const Operator* Int64UMod(); - const Operator* Int64LessThan(); - const Operator* Int64LessThanOrEqual(); - - // These operators change the representation of numbers while preserving the - // value of the number. Narrowing operators assume the input is representable - // in the target type and are *not* defined for other inputs. - // Use narrowing change operators only when there is a static guarantee that - // the input value is representable in the target value. - const Operator* ChangeFloat32ToFloat64(); - const Operator* ChangeFloat64ToInt32(); // narrowing - const Operator* ChangeFloat64ToUint32(); // narrowing - const Operator* ChangeInt32ToFloat64(); - const Operator* ChangeInt32ToInt64(); - const Operator* ChangeUint32ToFloat64(); - const Operator* ChangeUint32ToUint64(); - - // These operators truncate numbers, both changing the representation of - // the number and mapping multiple input values onto the same output value. - const Operator* TruncateFloat64ToFloat32(); - const Operator* TruncateFloat64ToInt32(); // JavaScript semantics. - const Operator* TruncateInt64ToInt32(); + const Operator* Word32And() const WARN_UNUSED_RESULT; + const Operator* Word32Or() const WARN_UNUSED_RESULT; + const Operator* Word32Xor() const WARN_UNUSED_RESULT; + const Operator* Word32Shl() const WARN_UNUSED_RESULT; + const Operator* Word32Shr() const WARN_UNUSED_RESULT; + const Operator* Word32Sar() const WARN_UNUSED_RESULT; + const Operator* Word32Ror() const WARN_UNUSED_RESULT; + const Operator* Word32Equal() const WARN_UNUSED_RESULT; + + const Operator* Word64And() const WARN_UNUSED_RESULT; + const Operator* Word64Or() const WARN_UNUSED_RESULT; + const Operator* Word64Xor() const WARN_UNUSED_RESULT; + const Operator* Word64Shl() const WARN_UNUSED_RESULT; + const Operator* Word64Shr() const WARN_UNUSED_RESULT; + const Operator* Word64Sar() const WARN_UNUSED_RESULT; + const Operator* Word64Ror() const WARN_UNUSED_RESULT; + const Operator* Word64Equal() const WARN_UNUSED_RESULT; + + const Operator* Int32Add() const WARN_UNUSED_RESULT; + const Operator* Int32AddWithOverflow() const WARN_UNUSED_RESULT; + const Operator* Int32Sub() const WARN_UNUSED_RESULT; + const Operator* Int32SubWithOverflow() const WARN_UNUSED_RESULT; + const Operator* Int32Mul() const WARN_UNUSED_RESULT; + const Operator* Int32Div() const WARN_UNUSED_RESULT; + const Operator* Int32UDiv() const WARN_UNUSED_RESULT; + const Operator* Int32Mod() const WARN_UNUSED_RESULT; + const Operator* Int32UMod() const WARN_UNUSED_RESULT; + const Operator* Int32LessThan() const WARN_UNUSED_RESULT; + const Operator* Int32LessThanOrEqual() const WARN_UNUSED_RESULT; + const Operator* Uint32LessThan() const WARN_UNUSED_RESULT; + const Operator* Uint32LessThanOrEqual() const WARN_UNUSED_RESULT; + + const Operator* Int64Add() const WARN_UNUSED_RESULT; + const Operator* Int64Sub() const WARN_UNUSED_RESULT; + const Operator* Int64Mul() const WARN_UNUSED_RESULT; + const Operator* Int64Div() const WARN_UNUSED_RESULT; + const Operator* Int64UDiv() const WARN_UNUSED_RESULT; + const Operator* Int64Mod() const WARN_UNUSED_RESULT; + const Operator* Int64UMod() const WARN_UNUSED_RESULT; + const Operator* Int64LessThan() const WARN_UNUSED_RESULT; + const Operator* Int64LessThanOrEqual() const WARN_UNUSED_RESULT; + + // Convert representation of integers between float64 and int32/uint32. + // The precise rounding mode and handling of out of range inputs are *not* + // defined for these operators, since they are intended only for use with + // integers. + const Operator* ChangeInt32ToFloat64() const WARN_UNUSED_RESULT; + const Operator* ChangeUint32ToFloat64() const WARN_UNUSED_RESULT; + const Operator* ChangeFloat64ToInt32() const WARN_UNUSED_RESULT; + const Operator* ChangeFloat64ToUint32() const WARN_UNUSED_RESULT; + + // Sign/zero extend int32/uint32 to int64/uint64. + const Operator* ChangeInt32ToInt64() const WARN_UNUSED_RESULT; + const Operator* ChangeUint32ToUint64() const WARN_UNUSED_RESULT; + + // Truncate double to int32 using JavaScript semantics. + const Operator* TruncateFloat64ToInt32() const WARN_UNUSED_RESULT; + + // Truncate the high order bits and convert the remaining bits to int32. + const Operator* TruncateInt64ToInt32() const WARN_UNUSED_RESULT; // Floating point operators always operate with IEEE 754 round-to-nearest. - const Operator* Float64Add(); - const Operator* Float64Sub(); - const Operator* Float64Mul(); - const Operator* Float64Div(); - const Operator* Float64Mod(); + const Operator* Float64Add() const WARN_UNUSED_RESULT; + const Operator* Float64Sub() const WARN_UNUSED_RESULT; + const Operator* Float64Mul() const WARN_UNUSED_RESULT; + const Operator* Float64Div() const WARN_UNUSED_RESULT; + const Operator* Float64Mod() const WARN_UNUSED_RESULT; // Floating point comparisons complying to IEEE 754. - const Operator* Float64Equal(); - const Operator* Float64LessThan(); - const Operator* Float64LessThanOrEqual(); + const Operator* Float64Equal() const WARN_UNUSED_RESULT; + const Operator* Float64LessThan() const WARN_UNUSED_RESULT; + const Operator* Float64LessThanOrEqual() const WARN_UNUSED_RESULT; // load [base + index] - const Operator* Load(LoadRepresentation rep); + const Operator* Load(LoadRepresentation rep) const WARN_UNUSED_RESULT; // store [base + index], value - const Operator* Store(StoreRepresentation rep); + const Operator* Store(StoreRepresentation rep) const WARN_UNUSED_RESULT; // Target machine word-size assumed by this builder. bool Is32() const { return word() == kRepWord32; } @@ -167,7 +167,7 @@ class MachineOperatorBuilder FINAL { V(Int, LessThan) \ V(Int, LessThanOrEqual) #define PSEUDO_OP(Prefix, Suffix) \ - const Operator* Prefix##Suffix() { \ + const Operator* Prefix##Suffix() const { \ return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \ } PSEUDO_OP_LIST(PSEUDO_OP) diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h index e62eaeea8..d73a9268b 100644 --- a/src/compiler/node-matchers.h +++ b/src/compiler/node-matchers.h @@ -90,7 +90,6 @@ struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> { bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); } }; -typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher; typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher; typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher; diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h index dabf5c57e..f295eac09 100644 --- a/src/compiler/opcodes.h +++ b/src/compiler/opcodes.h @@ -25,7 +25,6 @@ #define LEAF_OP_LIST(V) \ V(Int32Constant) \ V(Int64Constant) \ - V(Float32Constant) \ V(Float64Constant) \ V(ExternalConstant) \ V(NumberConstant) \ @@ -162,64 +161,62 @@ V(StoreElement) // Opcodes for Machine-level operators. -#define MACHINE_OP_LIST(V) \ - V(Load) \ - V(Store) \ - V(Word32And) \ - V(Word32Or) \ - V(Word32Xor) \ - V(Word32Shl) \ - V(Word32Shr) \ - V(Word32Sar) \ - V(Word32Ror) \ - V(Word32Equal) \ - V(Word64And) \ - V(Word64Or) \ - V(Word64Xor) \ - V(Word64Shl) \ - V(Word64Shr) \ - V(Word64Sar) \ - V(Word64Ror) \ - V(Word64Equal) \ - V(Int32Add) \ - V(Int32AddWithOverflow) \ - V(Int32Sub) \ - V(Int32SubWithOverflow) \ - V(Int32Mul) \ - V(Int32Div) \ - V(Int32UDiv) \ - V(Int32Mod) \ - V(Int32UMod) \ - V(Int32LessThan) \ - V(Int32LessThanOrEqual) \ - V(Uint32LessThan) \ - V(Uint32LessThanOrEqual) \ - V(Int64Add) \ - V(Int64Sub) \ - V(Int64Mul) \ - V(Int64Div) \ - V(Int64UDiv) \ - V(Int64Mod) \ - V(Int64UMod) \ - V(Int64LessThan) \ - V(Int64LessThanOrEqual) \ - V(ChangeFloat32ToFloat64) \ - V(ChangeFloat64ToInt32) \ - V(ChangeFloat64ToUint32) \ - V(ChangeInt32ToFloat64) \ - V(ChangeInt32ToInt64) \ - V(ChangeUint32ToFloat64) \ - V(ChangeUint32ToUint64) \ - V(TruncateFloat64ToFloat32) \ - V(TruncateFloat64ToInt32) \ - V(TruncateInt64ToInt32) \ - V(Float64Add) \ - V(Float64Sub) \ - V(Float64Mul) \ - V(Float64Div) \ - V(Float64Mod) \ - V(Float64Equal) \ - V(Float64LessThan) \ +#define MACHINE_OP_LIST(V) \ + V(Load) \ + V(Store) \ + V(Word32And) \ + V(Word32Or) \ + V(Word32Xor) \ + V(Word32Shl) \ + V(Word32Shr) \ + V(Word32Sar) \ + V(Word32Ror) \ + V(Word32Equal) \ + V(Word64And) \ + V(Word64Or) \ + V(Word64Xor) \ + V(Word64Shl) \ + V(Word64Shr) \ + V(Word64Sar) \ + V(Word64Ror) \ + V(Word64Equal) \ + V(Int32Add) \ + V(Int32AddWithOverflow) \ + V(Int32Sub) \ + V(Int32SubWithOverflow) \ + V(Int32Mul) \ + V(Int32Div) \ + V(Int32UDiv) \ + V(Int32Mod) \ + V(Int32UMod) \ + V(Int32LessThan) \ + V(Int32LessThanOrEqual) \ + V(Uint32LessThan) \ + V(Uint32LessThanOrEqual) \ + V(Int64Add) \ + V(Int64Sub) \ + V(Int64Mul) \ + V(Int64Div) \ + V(Int64UDiv) \ + V(Int64Mod) \ + V(Int64UMod) \ + V(Int64LessThan) \ + V(Int64LessThanOrEqual) \ + V(ChangeInt32ToFloat64) \ + V(ChangeUint32ToFloat64) \ + V(ChangeFloat64ToInt32) \ + V(ChangeFloat64ToUint32) \ + V(ChangeInt32ToInt64) \ + V(ChangeUint32ToUint64) \ + V(TruncateFloat64ToInt32) \ + V(TruncateInt64ToInt32) \ + V(Float64Add) \ + V(Float64Sub) \ + V(Float64Mul) \ + V(Float64Div) \ + V(Float64Mod) \ + V(Float64Equal) \ + V(Float64LessThan) \ V(Float64LessThanOrEqual) #define VALUE_OP_LIST(V) \ diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 31d53e44e..1bd87b3be 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -269,14 +269,13 @@ Handle<Code> Pipeline::GenerateCode() { SourcePositionTable::Scope pos(&source_positions, SourcePosition::Unknown()); Linkage linkage(info()); - // TODO(turbofan): Value numbering disabled for now. - // ValueNumberingReducer vn_reducer(zone()); + ValueNumberingReducer vn_reducer(zone()); SimplifiedOperatorReducer simple_reducer(&jsgraph); ChangeLowering lowering(&jsgraph, &linkage); MachineOperatorReducer mach_reducer(&jsgraph); GraphReducer graph_reducer(&graph); // TODO(titzer): Figure out if we should run all reducers at once here. - // graph_reducer.AddReducer(&vn_reducer); + graph_reducer.AddReducer(&vn_reducer); graph_reducer.AddReducer(&simple_reducer); graph_reducer.AddReducer(&lowering); graph_reducer.AddReducer(&mach_reducer); diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h index a4af55a68..9cb161327 100644 --- a/src/compiler/raw-machine-assembler.h +++ b/src/compiler/raw-machine-assembler.h @@ -5,6 +5,12 @@ #ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ #define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ +#ifdef USE_SIMULATOR +#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0 +#else +#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1 +#endif + #include "src/v8.h" #include "src/compiler/common-operator.h" @@ -369,6 +375,21 @@ class RawMachineAssembler : public GraphBuilder { return NewNode(machine()->TruncateInt64ToInt32(), a); } +#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C + // Call to C. + Node* CallC(Node* function_address, MachineType return_type, + MachineType* arg_types, Node** args, int n_args) { + CallDescriptor* descriptor = + Linkage::GetSimplifiedCDescriptor(zone(), machine_sig()); + Node** passed_args = zone()->NewArray<Node*>(n_args + 1); + passed_args[0] = function_address; + for (int i = 0; i < n_args; ++i) { + passed_args[i + 1] = args[i]; + } + return NewNode(common()->Call(descriptor), n_args + 1, passed_args); + } +#endif + // Parameters. Node* Parameter(size_t index); diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc index bfecdef61..c4af35e16 100644 --- a/src/compiler/typer.cc +++ b/src/compiler/typer.cc @@ -255,12 +255,6 @@ Bounds Typer::Visitor::TypeInt64Constant(Node* node) { } -Bounds Typer::Visitor::TypeFloat32Constant(Node* node) { - // TODO(titzer): only call Type::Of() if the type is not already known. - return Bounds(Type::Of(OpParameter<float>(node), zone())); -} - - Bounds Typer::Visitor::TypeFloat64Constant(Node* node) { // TODO(titzer): only call Type::Of() if the type is not already known. return Bounds(Type::Of(OpParameter<double>(node), zone())); diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc index 4d078b759..cb10477a3 100644 --- a/src/compiler/x64/code-generator-x64.cc +++ b/src/compiler/x64/code-generator-x64.cc @@ -205,7 +205,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { switch (ArchOpcodeField::decode(instr->opcode())) { case kArchCallCodeObject: { - EnsureSpaceForLazyDeopt(); if (HasImmediateInput(instr, 0)) { Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); __ Call(code, RelocInfo::CODE_TARGET); @@ -217,8 +216,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { AddSafepointAndDeopt(instr); break; } + case kArchCallAddress: + if (HasImmediateInput(instr, 0)) { + Immediate64 imm = i.InputImmediate64(0); + DCHECK_EQ(kImm64Value, imm.type); + __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64); + } else { + __ call(i.InputRegister(0)); + } + break; case kArchCallJSFunction: { - EnsureSpaceForLazyDeopt(); Register func = i.InputRegister(0); if (FLAG_debug_code) { // Check the function's context matches the context argument. @@ -229,6 +236,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { AddSafepointAndDeopt(instr); break; } + case kArchDrop: { + int words = MiscField::decode(instr->opcode()); + __ addq(rsp, Immediate(kPointerSize * words)); + break; + } case kArchJmp: __ jmp(code_->GetLabel(i.InputBlock(0))); break; @@ -993,21 +1005,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } - -void CodeGenerator::EnsureSpaceForLazyDeopt() { - int space_needed = Deoptimizer::patch_size(); - if (!linkage()->info()->IsStub()) { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - if (current_pc < last_lazy_deopt_pc_ + space_needed) { - int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - __ Nop(padding_size); - } - } - MarkLazyDeoptSite(); -} - #undef __ } // namespace internal diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc index 96501e686..e041a7484 100644 --- a/src/compiler/x64/instruction-selector-x64.cc +++ b/src/compiler/x64/instruction-selector-x64.cc @@ -691,6 +691,9 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, opcode = kArchCallCodeObject; break; } + case CallDescriptor::kCallAddress: + opcode = kArchCallAddress; + break; case CallDescriptor::kCallJSFunction: opcode = kArchCallJSFunction; break; @@ -710,6 +713,15 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, DCHECK(continuation != NULL); call_instr->MarkAsControl(); } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + !buffer.pushed_nodes.empty()) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArchDrop | + MiscField::encode(static_cast<int>(buffer.pushed_nodes.size())), + NULL); + } } } // namespace compiler diff --git a/src/conversions-inl.h b/src/conversions-inl.h index ae87dc4d3..bac157682 100644 --- a/src/conversions-inl.h +++ b/src/conversions-inl.h @@ -67,14 +67,6 @@ inline unsigned int FastD2UI(double x) { } -inline float DoubleToFloat32(double x) { - // TODO(yanggou): This static_cast is implementation-defined behaviour in C++, - // so we may need to do the conversion manually instead to match the spec. - volatile float f = static_cast<float>(x); - return f; -} - - inline double DoubleToInteger(double x) { if (std::isnan(x)) return 0; if (!std::isfinite(x) || x == 0) return x; diff --git a/src/conversions.h b/src/conversions.h index 6a28b5f1e..1b76ac5b6 100644 --- a/src/conversions.h +++ b/src/conversions.h @@ -77,10 +77,6 @@ inline double FastUI2D(unsigned x) { } -// This function should match the exact semantics of ECMA-262 20.2.2.17. -inline float DoubleToFloat32(double x); - - // This function should match the exact semantics of ECMA-262 9.4. inline double DoubleToInteger(double x); diff --git a/src/counters.h b/src/counters.h index 651cf54be..f97b9d2be 100644 --- a/src/counters.h +++ b/src/counters.h @@ -311,11 +311,7 @@ class HistogramTimerScope BASE_EMBEDDED { HT(pre_parse, V8.PreParse) \ /* Total compilation times. */ \ HT(compile, V8.Compile) \ - HT(compile_eval, V8.CompileEval) \ - /* Serialization as part of compilation (code caching) */ \ - HT(compile_serialize, V8.CompileSerialize) \ - HT(compile_deserialize, V8.CompileDeserialize) - + HT(compile_eval, V8.CompileEval) #define HISTOGRAM_PERCENTAGE_LIST(HP) \ /* Heap fragmentation. */ \ diff --git a/src/deoptimizer.h b/src/deoptimizer.h index 612d5f6ec..a0961fc36 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -101,41 +101,20 @@ class Deoptimizer : public Malloced { static const int kBailoutTypesWithCodeEntry = SOFT + 1; - struct Reason { - Reason(int r, const char* m, const char* d) - : raw_position(r), mnemonic(m), detail(d) {} - - bool operator==(const Reason& other) const { - return raw_position == other.raw_position && - CStringEquals(mnemonic, other.mnemonic) && - CStringEquals(detail, other.detail); - } - - bool operator!=(const Reason& other) const { return !(*this == other); } - - int raw_position; - const char* mnemonic; - const char* detail; - }; - struct JumpTableEntry : public ZoneObject { - inline JumpTableEntry(Address entry, const Reason& the_reason, - Deoptimizer::BailoutType type, bool frame) + inline JumpTableEntry(Address entry, const char* the_mnemonic, + const char* the_reason, Deoptimizer::BailoutType type, + bool frame) : label(), address(entry), + mnemonic(the_mnemonic), reason(the_reason), bailout_type(type), needs_frame(frame) {} - - bool IsEquivalentTo(const JumpTableEntry& other) const { - return address == other.address && bailout_type == other.bailout_type && - needs_frame == other.needs_frame && - (!FLAG_trace_deopt || reason == other.reason); - } - Label label; Address address; - Reason reason; + const char* mnemonic; + const char* reason; Deoptimizer::BailoutType bailout_type; bool needs_frame; }; diff --git a/src/elements-kind.h b/src/elements-kind.h index fb973411e..b48a5dfe0 100644 --- a/src/elements-kind.h +++ b/src/elements-kind.h @@ -87,11 +87,6 @@ inline bool IsDictionaryElementsKind(ElementsKind kind) { } -inline bool IsSloppyArgumentsElements(ElementsKind kind) { - return kind == SLOPPY_ARGUMENTS_ELEMENTS; -} - - inline bool IsExternalArrayElementsKind(ElementsKind kind) { return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND; diff --git a/src/heap-snapshot-generator-inl.h b/src/heap-snapshot-generator-inl.h index 3f7e62216..f7d87aa31 100644 --- a/src/heap-snapshot-generator-inl.h +++ b/src/heap-snapshot-generator-inl.h @@ -43,6 +43,25 @@ HeapGraphEdge** HeapEntry::children_arr() { } +SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) { + return kGcRootsFirstSubrootId + delta * kObjectIdStep; +} + + +HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) { + return reinterpret_cast<HeapObject*>( + reinterpret_cast<char*>(kFirstGcSubrootObject) + + delta * HeapObjectsMap::kObjectIdStep); +} + + +int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) { + return static_cast<int>( + (reinterpret_cast<char*>(subroot) - + reinterpret_cast<char*>(kFirstGcSubrootObject)) / + HeapObjectsMap::kObjectIdStep); +} + } } // namespace v8::internal #endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc index 4a4c914e4..cb9edaf2b 100644 --- a/src/heap-snapshot-generator.cc +++ b/src/heap-snapshot-generator.cc @@ -188,6 +188,7 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler, uid_(uid), root_index_(HeapEntry::kNoEntry), gc_roots_index_(HeapEntry::kNoEntry), + natives_root_index_(HeapEntry::kNoEntry), max_snapshot_js_object_id_(0) { STATIC_ASSERT( sizeof(HeapGraphEdge) == @@ -216,18 +217,6 @@ void HeapSnapshot::RememberLastJSObjectId() { } -void HeapSnapshot::AddSyntheticRootEntries() { - AddRootEntry(); - AddGcRootsEntry(); - SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId; - for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) { - AddGcSubrootEntry(tag, id); - id += HeapObjectsMap::kObjectIdStep; - } - DCHECK(HeapObjectsMap::kFirstAvailableObjectId == id); -} - - HeapEntry* HeapSnapshot::AddRootEntry() { DCHECK(root_index_ == HeapEntry::kNoEntry); DCHECK(entries_.is_empty()); // Root entry must be the first one. @@ -254,11 +243,15 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() { } -HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) { +HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); - HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, - VisitorSynchronization::kTagNames[tag], id, 0, 0); + HeapEntry* entry = AddEntry( + HeapEntry::kSynthetic, + VisitorSynchronization::kTagNames[tag], + HeapObjectsMap::GetNthGcSubrootId(tag), + 0, + 0); gc_subroot_indexes_[tag] = entry->index(); return entry; } @@ -778,6 +771,20 @@ void HeapObjectsSet::SetTag(Object* obj, const char* tag) { } +HeapObject* const V8HeapExplorer::kInternalRootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId)); +HeapObject* const V8HeapExplorer::kGcRootsObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId)); +HeapObject* const V8HeapExplorer::kFirstGcSubrootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId)); +HeapObject* const V8HeapExplorer::kLastGcSubrootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId)); + + V8HeapExplorer::V8HeapExplorer( HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress, @@ -802,7 +809,16 @@ HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) { HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) { - if (object->IsJSFunction()) { + if (object == kInternalRootObject) { + snapshot_->AddRootEntry(); + return snapshot_->root(); + } else if (object == kGcRootsObject) { + HeapEntry* entry = snapshot_->AddGcRootsEntry(); + return entry; + } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) { + HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object)); + return entry; + } else if (object->IsJSFunction()) { JSFunction* func = JSFunction::cast(object); SharedFunctionInfo* shared = func->shared(); const char* name = shared->bound() ? "native_bind" : @@ -949,6 +965,41 @@ class SnapshotFiller { }; +class GcSubrootsEnumerator : public ObjectVisitor { + public: + GcSubrootsEnumerator( + SnapshotFiller* filler, V8HeapExplorer* explorer) + : filler_(filler), + explorer_(explorer), + previous_object_count_(0), + object_count_(0) { + } + void VisitPointers(Object** start, Object** end) { + object_count_ += end - start; + } + void Synchronize(VisitorSynchronization::SyncTag tag) { + // Skip empty subroots. + if (previous_object_count_ != object_count_) { + previous_object_count_ = object_count_; + filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_); + } + } + private: + SnapshotFiller* filler_; + V8HeapExplorer* explorer_; + intptr_t previous_object_count_; + intptr_t object_count_; +}; + + +void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) { + filler->AddEntry(kInternalRootObject, this); + filler->AddEntry(kGcRootsObject, this); + GcSubrootsEnumerator enumerator(filler, this); + heap_->IterateRoots(&enumerator, VISIT_ALL); +} + + const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) { switch (object->map()->instance_type()) { case MAP_TYPE: @@ -1804,6 +1855,9 @@ class RootsReferencesExtractor : public ObjectVisitor { void FillReferences(V8HeapExplorer* explorer) { DCHECK(strong_references_.length() <= all_references_.length()); Builtins* builtins = heap_->isolate()->builtins(); + for (int i = 0; i < reference_tags_.length(); ++i) { + explorer->SetGcRootsReference(reference_tags_[i].tag); + } int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0; while (all_index < all_references_.length()) { bool is_strong = strong_index < strong_references_.length() @@ -1846,15 +1900,10 @@ bool V8HeapExplorer::IterateAndExtractReferences( SnapshotFiller* filler) { filler_ = filler; - // Create references to the synthetic roots. - SetRootGcRootsReference(); - for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) { - SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag)); - } - // Make sure builtin code objects get their builtin tags // first. Otherwise a particular JSFunction object could set // its custom name to a generic builtin. + SetRootGcRootsReference(); RootsReferencesExtractor extractor(heap_); heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG); extractor.SetCollectingAllReferences(); @@ -2570,8 +2619,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { debug_heap->Verify(); #endif - snapshot_->AddSyntheticRootEntries(); - if (!FillReferences()) return false; snapshot_->FillChildren(); @@ -2612,6 +2659,7 @@ void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { bool HeapSnapshotGenerator::FillReferences() { SnapshotFiller filler(snapshot_, &entries_); + v8_heap_explorer_.AddRootEntries(&filler); return v8_heap_explorer_.IterateAndExtractReferences(&filler) && dom_explorer_.IterateAndExtractReferences(&filler); } diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h index 3e4ce71b8..a0d73bfae 100644 --- a/src/heap-snapshot-generator.h +++ b/src/heap-snapshot-generator.h @@ -100,7 +100,7 @@ class HeapEntry BASE_EMBEDDED { Type type() { return static_cast<Type>(type_); } const char* name() { return name_; } void set_name(const char* name) { name_ = name; } - SnapshotObjectId id() { return id_; } + inline SnapshotObjectId id() { return id_; } size_t self_size() { return self_size_; } unsigned trace_node_id() const { return trace_node_id_; } INLINE(int index() const); @@ -154,6 +154,7 @@ class HeapSnapshot { size_t RawSnapshotSize() const; HeapEntry* root() { return &entries_[root_index_]; } HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; } + HeapEntry* natives_root() { return &entries_[natives_root_index_]; } HeapEntry* gc_subroot(int index) { return &entries_[gc_subroot_indexes_[index]]; } @@ -170,7 +171,10 @@ class HeapSnapshot { SnapshotObjectId id, size_t size, unsigned trace_node_id); - void AddSyntheticRootEntries(); + HeapEntry* AddRootEntry(); + HeapEntry* AddGcRootsEntry(); + HeapEntry* AddGcSubrootEntry(int tag); + HeapEntry* AddNativesRootEntry(); HeapEntry* GetEntryById(SnapshotObjectId id); List<HeapEntry*>* GetSortedEntriesList(); void FillChildren(); @@ -179,15 +183,12 @@ class HeapSnapshot { void PrintEntriesSize(); private: - HeapEntry* AddRootEntry(); - HeapEntry* AddGcRootsEntry(); - HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id); - HeapProfiler* profiler_; const char* title_; unsigned uid_; int root_index_; int gc_roots_index_; + int natives_root_index_; int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags]; List<HeapEntry> entries_; List<HeapGraphEdge> edges_; @@ -222,10 +223,12 @@ class HeapObjectsMap { size_t GetUsedMemorySize() const; SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info); + static inline SnapshotObjectId GetNthGcSubrootId(int delta); static const int kObjectIdStep = 2; static const SnapshotObjectId kInternalRootObjectId; static const SnapshotObjectId kGcRootsObjectId; + static const SnapshotObjectId kNativesRootObjectId; static const SnapshotObjectId kGcRootsFirstSubrootId; static const SnapshotObjectId kFirstAvailableObjectId; @@ -345,6 +348,8 @@ class V8HeapExplorer : public HeapEntriesAllocator { static String* GetConstructorName(JSObject* object); + static HeapObject* const kInternalRootObject; + private: typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry, HeapObject* object); @@ -445,6 +450,9 @@ class V8HeapExplorer : public HeapEntriesAllocator { HeapEntry* GetEntry(Object* obj); + static inline HeapObject* GetNthGcSubrootObject(int delta); + static inline int GetGcSubrootOrder(HeapObject* subroot); + Heap* heap_; HeapSnapshot* snapshot_; StringsStorage* names_; @@ -457,7 +465,12 @@ class V8HeapExplorer : public HeapEntriesAllocator { HeapObjectsSet weak_containers_; v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_; + static HeapObject* const kGcRootsObject; + static HeapObject* const kFirstGcSubrootObject; + static HeapObject* const kLastGcSubrootObject; + friend class IndexedReferencesExtractor; + friend class GcSubrootsEnumerator; friend class RootsReferencesExtractor; DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer); diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h index daab616d6..473b4742d 100644 --- a/src/heap/gc-idle-time-handler.h +++ b/src/heap/gc-idle-time-handler.h @@ -108,7 +108,7 @@ class GCIdleTimeHandler { // Heap size threshold below which we prefer mark-compact over incremental // step. - static const size_t kSmallHeapSize = 4 * kPointerSize * MB; + static const size_t kSmallHeapSize = 2 * kPointerSize * MB; // That is the maximum idle time we will have during frame rendering. static const size_t kMaxFrameRenderingIdleTime = 16; diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h index 695c629a7..ab571470a 100644 --- a/src/hydrogen-instructions.h +++ b/src/hydrogen-instructions.h @@ -455,10 +455,10 @@ class HSourcePosition { // Offset from the start of the inlined function. typedef BitField<int, 9, 23> PositionField; + // On HPositionInfo can use this constructor. explicit HSourcePosition(int value) : value_(value) { } friend class HPositionInfo; - friend class LCodeGenBase; // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField // and PositionField. diff --git a/src/hydrogen.cc b/src/hydrogen.cc index dfadbe56f..e5a93a7bd 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -4834,9 +4834,14 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { DCHECK(current_block() != NULL); DCHECK(current_block()->HasPredecessor()); + // We only optimize switch statements with a bounded number of clauses. + const int kCaseClauseLimit = 128; ZoneList<CaseClause*>* clauses = stmt->cases(); int clause_count = clauses->length(); ZoneList<HBasicBlock*> body_blocks(clause_count, zone()); + if (clause_count > kCaseClauseLimit) { + return Bailout(kSwitchStatementTooManyClauses); + } CHECK_ALIVE(VisitForValue(stmt->tag())); Add<HSimulate>(stmt->EntryId()); @@ -6431,7 +6436,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr, HValue* key = environment()->ExpressionStackAt(1); HValue* object = environment()->ExpressionStackAt(2); bool has_side_effects = false; - HandleKeyedElementAccess(object, key, value, expr, ast_id, return_id, STORE, + HandleKeyedElementAccess(object, key, value, expr, return_id, STORE, &has_side_effects); Drop(3); Push(value); @@ -7124,7 +7129,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( - HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id, + HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId return_id, PropertyAccessType access_type, bool* has_side_effects) { if (key->ActualValue()->IsConstant()) { @@ -7138,7 +7143,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( Handle<String>::cast(constant)); } HInstruction* instr = - BuildNamedAccess(access_type, ast_id, return_id, expr, obj, + BuildNamedAccess(access_type, expr->id(), return_id, expr, obj, Handle<String>::cast(constant), val, false); if (instr == NULL || instr->IsLinked()) { *has_side_effects = false; @@ -7360,7 +7365,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr, bool has_side_effects = false; HValue* load = HandleKeyedElementAccess( - obj, key, NULL, expr, ast_id, expr->LoadId(), LOAD, &has_side_effects); + obj, key, NULL, expr, expr->LoadId(), LOAD, &has_side_effects); if (has_side_effects) { if (ast_context()->IsEffect()) { Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); diff --git a/src/hydrogen.h b/src/hydrogen.h index d5e208f37..6b031708c 100644 --- a/src/hydrogen.h +++ b/src/hydrogen.h @@ -2627,8 +2627,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor { bool* has_side_effects); HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val, - Expression* expr, BailoutId ast_id, - BailoutId return_id, + Expression* expr, BailoutId return_id, PropertyAccessType access_type, bool* has_side_effects); diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 9c1e3ccbc..e9633a14f 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -3502,8 +3502,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); // Unique names are compared by identity. Label done; @@ -3728,8 +3728,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Check if the entry name is not a unique name. __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(entity_name, Map::kInstanceTypeOffset), miss); + __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset), + miss); __ bind(&good); } @@ -3863,9 +3863,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // Check if the entry name is not a unique name. __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(scratch, Map::kInstanceTypeOffset), - &maybe_in_dictionary); + __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), + &maybe_in_dictionary); } } diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 8490ff975..259fbf0c3 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -386,7 +386,11 @@ bool LCodeGen::GenerateJumpTable() { Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; __ bind(&table_entry->label); Address entry = table_entry->address; - DeoptComment(table_entry->reason); + Deoptimizer::BailoutType type = table_entry->bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + DeoptComment(table_entry->mnemonic, table_entry->reason); if (table_entry->needs_frame) { DCHECK(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); @@ -821,7 +825,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization( void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail, + const char* reason, Deoptimizer::BailoutType bailout_type) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -863,19 +867,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, __ bind(&done); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); if (cc == no_condition && frame_is_built_) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().bailout_type != bailout_type) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); jump_table_.Add(table_entry, zone()); } if (cc == no_condition) { @@ -888,11 +892,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, detail, bailout_type); + DeoptimizeIf(cc, instr, reason, bailout_type); } @@ -1616,6 +1620,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) { switch (instr->op()) { case Token::ROR: __ ror_cl(ToRegister(left)); + if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr); + } break; case Token::SAR: __ sar_cl(ToRegister(left)); @@ -3698,7 +3706,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cvttsd2si(output_reg, Operand(xmm_scratch)); // Overflow is signalled with minint. __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, "conversion overflow"); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(overflow, instr); __ jmp(&done, dist); __ bind(&below_one_half); @@ -3713,7 +3722,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cvttsd2si(output_reg, Operand(input_temp)); // Catch minint due to overflow, and to prevent overflow when compensating. __ cmp(output_reg, 0x1); - DeoptimizeIf(overflow, instr, "conversion overflow"); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(overflow, instr); __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(xmm_scratch, input_temp); @@ -3729,7 +3739,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { // If the sign is positive, we return +0. __ movmskpd(output_reg, input_reg); __ test(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, "minus zero"); + __ RecordComment("Minus zero"); + DeoptimizeIf(not_zero, instr); } __ Move(output_reg, Immediate(0)); __ bind(&done); @@ -4749,26 +4760,31 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { __ bind(&check_false); __ cmp(input_reg, factory()->false_value()); - DeoptimizeIf(not_equal, instr, "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(not_equal, instr); __ Move(input_reg, Immediate(0)); } else { XMMRegister scratch = ToDoubleRegister(instr->temp()); DCHECK(!scratch.is(xmm0)); __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(not_equal, instr); __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ cvttsd2si(input_reg, Operand(xmm0)); __ Cvtsi2sd(scratch, Operand(input_reg)); __ ucomisd(xmm0, scratch); - DeoptimizeIf(not_equal, instr, "lost precision"); - DeoptimizeIf(parity_even, instr, "NaN"); + __ RecordComment("Deferred TaggedToI: lost precision"); + DeoptimizeIf(not_equal, instr); + __ RecordComment("Deferred TaggedToI: NaN"); + DeoptimizeIf(parity_even, instr); if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { __ test(input_reg, Operand(input_reg)); __ j(not_zero, done); __ movmskpd(input_reg, xmm0); __ and_(input_reg, 1); - DeoptimizeIf(not_zero, instr, "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(not_zero, instr); } } } diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h index ee1eb0505..555cf8c5c 100644 --- a/src/ia32/lithium-codegen-ia32.h +++ b/src/ia32/lithium-codegen-ia32.h @@ -209,10 +209,10 @@ class LCodeGen: public LCodeGenBase { void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail, + void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); bool DeoptEveryNTimes() { return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index f0a6ae30d..f938d5001 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -2917,9 +2917,9 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand, - Label* not_unique_name, - Label::Distance distance) { +void MacroAssembler::JumpIfNotUniqueName(Operand operand, + Label* not_unique_name, + Label::Distance distance) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 9c417f265..405bea8e7 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -915,13 +915,13 @@ class MacroAssembler: public Assembler { Label* on_not_flat_one_byte_strings); // Checks if the given register or operand is a unique name - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name, - Label::Distance distance = Label::kFar) { - JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name, + Label::Distance distance = Label::kFar) { + JumpIfNotUniqueName(Operand(reg), not_unique_name, distance); } - void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name, - Label::Distance distance = Label::kFar); + void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, + Label::Distance distance = Label::kFar); void EmitSeqStringSetCharCheck(Register string, Register index, diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc index ae1316150..1b6cf72df 100644 --- a/src/ic/arm/ic-arm.cc +++ b/src/ic/arm/ic-arm.cc @@ -369,6 +369,32 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is in lr. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(r1)); + DCHECK(key.is(r2)); + + Label slow, notin; + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, r0, r3, r4, ¬in, &slow); + __ ldr(r0, mapped_location); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in r0. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow); + __ ldr(r0, unmapped_location); + __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); + __ cmp(r0, r3); + __ b(eq, &slow); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register key = StoreDescriptor::NameRegister(); diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc index 7bef56e94..e918fdc9c 100644 --- a/src/ic/arm/ic-compiler-arm.cc +++ b/src/ic/arm/ic-compiler-arm.cc @@ -44,11 +44,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ cmp(this->name(), Operand(name)); __ b(ne, &miss); diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc index 76f9c24cf..9e5427ff9 100644 --- a/src/ic/arm64/ic-arm64.cc +++ b/src/ic/arm64/ic-arm64.cc @@ -370,6 +370,35 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is in lr. + Register result = x0; + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(x1)); + DCHECK(key.is(x2)); + + Label miss, unmapped; + + Register map_scratch = x0; + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss); + __ Ldr(result, mapped_location); + __ Ret(); + + __ Bind(&unmapped); + // Parameter map is left in map_scratch when a jump on unmapped is done. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss); + __ Ldr(result, unmapped_location); + __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss); + __ Ret(); + + __ Bind(&miss); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments"); Label slow, notin; diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc index ffc1069f2..fd9a4dbd7 100644 --- a/src/ic/arm64/ic-compiler-arm64.cc +++ b/src/ic/arm64/ic-compiler-arm64.cc @@ -45,11 +45,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ CompareAndBranch(this->name(), Operand(name), ne, &miss); } diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc index 4ed92ec6b..ecd3e3b54 100644 --- a/src/ic/handler-compiler.cc +++ b/src/ic/handler-compiler.cc @@ -390,13 +390,13 @@ void ElementHandlerCompiler::CompileElementHandlers( ElementsKind elements_kind = receiver_map->elements_kind(); if (receiver_map->has_indexed_interceptor()) { cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode(); - } else if (IsSloppyArgumentsElements(elements_kind)) { - cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode(); } else if (IsFastElementsKind(elements_kind) || IsExternalArrayElementsKind(elements_kind) || IsFixedTypedArrayElementsKind(elements_kind)) { cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind) .GetCode(); + } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) { + cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments(); } else { DCHECK(elements_kind == DICTIONARY_ELEMENTS); cached_stub = LoadDictionaryElementStub(isolate()).GetCode(); diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc index ac42f30bf..685eac982 100644 --- a/src/ic/ia32/ic-compiler-ia32.cc +++ b/src/ic/ia32/ic-compiler-ia32.cc @@ -48,11 +48,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ cmp(this->name(), Immediate(name)); __ j(not_equal, &miss); diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc index 67247d29e..2b4c95429 100644 --- a/src/ic/ia32/ic-ia32.cc +++ b/src/ic/ia32/ic-ia32.cc @@ -503,6 +503,32 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is on the stack. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + + Label slow, notin; + Factory* factory = masm->isolate()->factory(); + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, ebx, eax, ¬in, &slow); + __ mov(eax, mapped_location); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in ebx. + Operand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow); + __ cmp(unmapped_location, factory->the_hole_value()); + __ j(equal, &slow); + __ mov(eax, unmapped_location); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { // Return address is on the stack. Label slow, notin; diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc index aeae4ba90..3b9f51c87 100644 --- a/src/ic/ic-compiler.cc +++ b/src/ic/ic-compiler.cc @@ -96,8 +96,6 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic( Handle<Code> stub; if (receiver_map->has_indexed_interceptor()) { stub = LoadIndexedInterceptorStub(isolate).GetCode(); - } else if (receiver_map->has_sloppy_arguments_elements()) { - stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode(); } else if (receiver_map->has_fast_elements() || receiver_map->has_external_array_elements() || receiver_map->has_fixed_typed_array_elements()) { diff --git a/src/ic/ic.cc b/src/ic/ic.cc index 7f346a0f6..fc364e152 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -76,13 +76,7 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) { #else -#define TRACE_GENERIC_IC(isolate, type, reason) \ - do { \ - if (FLAG_trace_ic) { \ - PrintF("[%s patching generic stub in ", type); \ - PrintF("(see below) (%s)]\n", reason); \ - } \ - } while (false) +#define TRACE_GENERIC_IC(isolate, type, reason) #endif // DEBUG @@ -1146,14 +1140,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) { // If the miss wasn't due to an unseen map, a polymorphic stub // won't help, use the generic stub. - TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice"); return generic_stub(); } // If the maximum number of receiver maps has been exceeded, use the generic // version of the IC. if (target_receiver_maps.length() > kMaxKeyedPolymorphism) { - TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded"); return generic_stub(); } @@ -1187,7 +1181,11 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object, if (state() == UNINITIALIZED) stub = string_stub(); } else if (object->IsJSObject()) { Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (!Object::ToSmi(isolate(), key).is_null()) { + if (receiver->elements()->map() == + isolate()->heap()->sloppy_arguments_elements_map()) { + stub = sloppy_arguments_stub(); + } else if (!Object::ToSmi(isolate(), key).is_null() && + (!target().is_identical_to(sloppy_arguments_stub()))) { stub = LoadElementStub(receiver); } } @@ -1346,42 +1344,13 @@ Handle<Code> StoreIC::initialize_stub(Isolate* isolate, Handle<Code> StoreIC::megamorphic_stub() { - if (kind() == Code::STORE_IC) { - return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC, - extra_ic_state()); - } else { - DCHECK(kind() == Code::KEYED_STORE_IC); - if (strict_mode() == STRICT) { - return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); - } else { - return isolate()->builtins()->KeyedStoreIC_Generic(); - } - } + return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC, + extra_ic_state()); } Handle<Code> StoreIC::generic_stub() const { - if (kind() == Code::STORE_IC) { - return PropertyICCompiler::ComputeStore(isolate(), GENERIC, - extra_ic_state()); - } else { - DCHECK(kind() == Code::KEYED_STORE_IC); - if (strict_mode() == STRICT) { - return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); - } else { - return isolate()->builtins()->KeyedStoreIC_Generic(); - } - } -} - - -Handle<Code> StoreIC::slow_stub() const { - if (kind() == Code::STORE_IC) { - return isolate()->builtins()->StoreIC_Slow(); - } else { - DCHECK(kind() == Code::KEYED_STORE_IC); - return isolate()->builtins()->KeyedStoreIC_Slow(); - } + return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state()); } @@ -1402,11 +1371,9 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value, return; } - bool use_ic = LookupForWrite(lookup, value, store_mode); - if (!use_ic) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'"); - } - Handle<Code> code = use_ic ? ComputeHandler(lookup, value) : slow_stub(); + Handle<Code> code = LookupForWrite(lookup, value, store_mode) + ? ComputeHandler(lookup, value) + : slow_stub(); PatchCache(lookup->name(), code); TRACE_IC("StoreIC", lookup->name()); @@ -1427,10 +1394,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, case LookupIterator::TRANSITION: { Handle<Map> transition = lookup->transition_map(); // Currently not handled by CompileStoreTransition. - if (!holder->HasFastProperties()) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow"); - break; - } + if (!holder->HasFastProperties()) break; DCHECK(lookup->IsCacheableTransition()); NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); @@ -1444,21 +1408,14 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, } case LookupIterator::ACCESSOR: { - if (!holder->HasFastProperties()) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map"); - break; - } + if (!holder->HasFastProperties()) break; Handle<Object> accessors = lookup->GetAccessors(); if (accessors->IsExecutableAccessorInfo()) { Handle<ExecutableAccessorInfo> info = Handle<ExecutableAccessorInfo>::cast(accessors); - if (v8::ToCData<Address>(info->setter()) == 0) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0"); - break; - } + if (v8::ToCData<Address>(info->setter()) == 0) break; if (!ExecutableAccessorInfo::IsCompatibleReceiverType( isolate(), info, receiver_type())) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type"); break; } NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); @@ -1466,10 +1423,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, } else if (accessors->IsAccessorPair()) { Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(), isolate()); - if (!setter->IsJSFunction()) { - TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function"); - break; - } + if (!setter->IsJSFunction()) break; Handle<JSFunction> function = Handle<JSFunction>::cast(setter); CallOptimization call_optimization(function); NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); @@ -1483,7 +1437,6 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, } // TODO(dcarney): Handle correctly. DCHECK(accessors->IsDeclaredAccessorInfo()); - TRACE_GENERIC_IC(isolate(), "StoreIC", "declared accessor info"); break; } @@ -1524,7 +1477,6 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, // -------------- Constant properties -------------- DCHECK(lookup->property_details().type() == CONSTANT); - TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property"); break; } @@ -1543,7 +1495,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, // via megamorphic stubs, since they don't have a map in their relocation info // and so the stubs can't be harvested for the object needed for a map check. if (target()->type() != Code::NORMAL) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type"); return generic_stub(); } @@ -1609,14 +1561,14 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, if (!map_added) { // If the miss wasn't due to an unseen map, a polymorphic stub // won't help, use the generic stub. - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice"); return generic_stub(); } // If the maximum number of receiver maps has been exceeded, use the generic // version of the IC. if (target_receiver_maps.length() > kMaxKeyedPolymorphism) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "max polymorph exceeded"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded"); return generic_stub(); } @@ -1627,7 +1579,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, if (store_mode == STANDARD_STORE) { store_mode = old_store_mode; } else if (store_mode != old_store_mode) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch"); + TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch"); return generic_stub(); } } @@ -1645,7 +1597,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, } if (external_arrays != 0 && external_arrays != target_receiver_maps.length()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", + TRACE_GENERIC_IC(isolate(), "KeyedIC", "unsupported combination of external and normal arrays"); return generic_stub(); } @@ -1800,12 +1752,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, StoreIC::Store(object, Handle<String>::cast(key), value, JSReceiver::MAY_BE_STORE_FROM_KEYED), Object); - if (!is_target_set()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", - "unhandled internalized string key"); - TRACE_IC("StoreIC", key); - set_target(*stub); - } + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); + set_target(*stub); return store_handle; } @@ -1818,10 +1766,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, // expect to be able to trap element sets to objects with those maps in // the runtime to enable optimization of element hole access. Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); - if (heap_object->map()->IsMapInArrayPrototypeChain()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype"); - use_ic = false; - } + if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false; } if (use_ic) { @@ -1834,8 +1779,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, isolate()->heap()->sloppy_arguments_elements_map()) { if (strict_mode() == SLOPPY) { stub = sloppy_arguments_stub(); - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver"); } } else if (key_is_smi_like && !(target().is_identical_to(sloppy_arguments_stub()))) { @@ -1846,14 +1789,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) { KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value); stub = StoreElementStub(receiver, store_mode); - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype"); } - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key"); } - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver"); } } @@ -1870,9 +1807,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, if (*stub == generic) { TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); } - if (*stub == *slow_stub()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub"); - } DCHECK(!stub.is_null()); set_target(*stub); TRACE_IC("StoreIC", key); diff --git a/src/ic/ic.h b/src/ic/ic.h index d86d2b7b6..57d0d12d9 100644 --- a/src/ic/ic.h +++ b/src/ic/ic.h @@ -371,7 +371,7 @@ class LoadIC : public IC { } } - virtual Handle<Code> megamorphic_stub() OVERRIDE; + virtual Handle<Code> megamorphic_stub(); // Update the inline cache and the global stub cache based on the // lookup result. @@ -414,6 +414,7 @@ class KeyedLoadIC : public LoadIC { } static void GenerateGeneric(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm); + static void GenerateSloppyArguments(MacroAssembler* masm); // Bit mask to be tested against bit field for the cases when // generic stub should go into slow case. @@ -433,6 +434,9 @@ class KeyedLoadIC : public LoadIC { private: Handle<Code> generic_stub() const { return generic_stub(isolate()); } + Handle<Code> sloppy_arguments_stub() { + return isolate()->builtins()->KeyedLoadIC_SloppyArguments(); + } Handle<Code> string_stub() { return isolate()->builtins()->KeyedLoadIC_String(); } @@ -489,12 +493,14 @@ class StoreIC : public IC { JSReceiver::StoreFromKeyed store_mode); protected: - virtual Handle<Code> megamorphic_stub() OVERRIDE; + virtual Handle<Code> megamorphic_stub(); // Stub accessors. - Handle<Code> generic_stub() const; + virtual Handle<Code> generic_stub() const; - Handle<Code> slow_stub() const; + virtual Handle<Code> slow_stub() const { + return isolate()->builtins()->StoreIC_Slow(); + } virtual Handle<Code> pre_monomorphic_stub() const { return pre_monomorphic_stub(isolate(), strict_mode()); @@ -575,6 +581,16 @@ class KeyedStoreIC : public StoreIC { return isolate->builtins()->KeyedStoreIC_PreMonomorphic(); } } + virtual Handle<Code> slow_stub() const { + return isolate()->builtins()->KeyedStoreIC_Slow(); + } + virtual Handle<Code> megamorphic_stub() { + if (strict_mode() == STRICT) { + return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + } else { + return isolate()->builtins()->KeyedStoreIC_Generic(); + } + } Handle<Code> StoreElementStub(Handle<JSObject> receiver, KeyedAccessStoreMode store_mode); @@ -583,6 +599,14 @@ class KeyedStoreIC : public StoreIC { inline void set_target(Code* code); // Stub accessors. + virtual Handle<Code> generic_stub() const { + if (strict_mode() == STRICT) { + return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + } else { + return isolate()->builtins()->KeyedStoreIC_Generic(); + } + } + Handle<Code> sloppy_arguments_stub() { return isolate()->builtins()->KeyedStoreIC_SloppyArguments(); } diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc index c1e67f9ab..920b52a2f 100644 --- a/src/ic/mips/ic-compiler-mips.cc +++ b/src/ic/mips/ic-compiler-mips.cc @@ -27,11 +27,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ Branch(&miss, ne, this->name(), Operand(name)); } diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc index d97a6ba06..72a85b6bc 100644 --- a/src/ic/mips/ic-mips.cc +++ b/src/ic/mips/ic-mips.cc @@ -374,6 +374,32 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is in ra. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); + + Label slow, notin; + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, a0, a3, t0, ¬in, &slow); + __ Ret(USE_DELAY_SLOT); + __ lw(v0, mapped_location); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in a0. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); + __ lw(a0, unmapped_location); + __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); + __ Branch(&slow, eq, a0, Operand(a3)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register key = StoreDescriptor::NameRegister(); diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc index 796ed87f7..3c6eecc27 100644 --- a/src/ic/mips64/ic-compiler-mips64.cc +++ b/src/ic/mips64/ic-compiler-mips64.cc @@ -27,11 +27,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ Branch(&miss, ne, this->name(), Operand(name)); } diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc index a5d9fe78f..0ac35ffbc 100644 --- a/src/ic/mips64/ic-mips64.cc +++ b/src/ic/mips64/ic-mips64.cc @@ -372,6 +372,32 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is in ra. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); + + Label slow, notin; + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, a0, a3, a4, ¬in, &slow); + __ Ret(USE_DELAY_SLOT); + __ ld(v0, mapped_location); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in a2. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); + __ ld(a0, unmapped_location); + __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); + __ Branch(&slow, eq, a0, Operand(a3)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register key = StoreDescriptor::NameRegister(); diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc index a5848b6dd..3d7cc685d 100644 --- a/src/ic/x64/ic-compiler-x64.cc +++ b/src/ic/x64/ic-compiler-x64.cc @@ -82,11 +82,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ Cmp(this->name(), name); __ j(not_equal, &miss); diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc index ad79f3042..dc1b86b12 100644 --- a/src/ic/x64/ic-x64.cc +++ b/src/ic/x64/ic-x64.cc @@ -722,6 +722,31 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is on the stack. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + + Label slow, notin; + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, rbx, rax, rdi, ¬in, &slow); + __ movp(rax, mapped_location); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in rbx. + Operand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow); + __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex); + __ j(equal, &slow); + __ movp(rax, unmapped_location); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { // The return address is on the stack. Label slow, notin; diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc index e706998c3..8370f68cb 100644 --- a/src/ic/x87/handler-compiler-x87.cc +++ b/src/ic/x87/handler-compiler-x87.cc @@ -411,7 +411,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( // Update the write barrier for the map field. __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, - kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); if (details.type() == CONSTANT) { DCHECK(value_reg.is(eax)); @@ -445,7 +445,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ mov(storage_reg, value_reg); } __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, - kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); + EMIT_REMEMBERED_SET, smi_check); } } else { // Write to the properties array. @@ -464,7 +464,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ mov(storage_reg, value_reg); } __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, - kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); + EMIT_REMEMBERED_SET, smi_check); } } diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc index 20b47e726..2467a6d7d 100644 --- a/src/ic/x87/ic-compiler-x87.cc +++ b/src/ic/x87/ic-compiler-x87.cc @@ -48,11 +48,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, // In case we are compiling an IC for dictionary loads and stores, just // check whether the name is unique. if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); + __ JumpIfNotUniqueName(this->name(), &miss); } else { __ cmp(this->name(), Immediate(name)); __ j(not_equal, &miss); diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc index 9c090c56a..2cd6ea11f 100644 --- a/src/ic/x87/ic-x87.cc +++ b/src/ic/x87/ic-x87.cc @@ -133,7 +133,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label, // Update write barrier. Make sure not to clobber the value. __ mov(r1, value); - __ RecordWrite(elements, r0, r1, kDontSaveFPRegs); + __ RecordWrite(elements, r0, r1); } @@ -505,6 +505,32 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is on the stack. + Register receiver = LoadDescriptor::ReceiverRegister(); + Register key = LoadDescriptor::NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + + Label slow, notin; + Factory* factory = masm->isolate()->factory(); + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, ebx, eax, ¬in, &slow); + __ mov(eax, mapped_location); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in ebx. + Operand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow); + __ cmp(unmapped_location, factory->the_hole_value()); + __ j(equal, &slow); + __ mov(eax, unmapped_location); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { // Return address is on the stack. Label slow, notin; @@ -520,7 +546,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { __ mov(mapped_location, value); __ lea(ecx, mapped_location); __ mov(edx, value); - __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs); + __ RecordWrite(ebx, ecx, edx); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in ebx. @@ -529,7 +555,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { __ mov(unmapped_location, value); __ lea(edi, unmapped_location); __ mov(edx, value); - __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); + __ RecordWrite(ebx, edi, edx); __ Ret(); __ bind(&slow); GenerateMiss(masm); @@ -598,8 +624,7 @@ static void KeyedStoreGenerateGenericHelper( __ mov(FixedArrayElementOperand(ebx, key), value); // Update write barrier for the elements array address. __ mov(edx, value); // Preserve the value which is returned. - __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteArray(ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ ret(0); __ bind(fast_double); diff --git a/src/json-parser.h b/src/json-parser.h index d3148c9e2..caa2e14d2 100644 --- a/src/json-parser.h +++ b/src/json-parser.h @@ -426,7 +426,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() { if (value.is_null()) return ReportUnexpectedCharacter(); } - Runtime::DefineObjectProperty(json_object, key, value, NONE).Check(); + JSObject::SetOwnPropertyIgnoreAttributes( + json_object, key, value, NONE).Assert(); } while (MatchSkipWhiteSpace(',')); if (c0_ != '}') { return ReportUnexpectedCharacter(); diff --git a/src/lithium-codegen.cc b/src/lithium-codegen.cc index 0207188bd..86401167a 100644 --- a/src/lithium-codegen.cc +++ b/src/lithium-codegen.cc @@ -147,12 +147,9 @@ void LCodeGenBase::Comment(const char* format, ...) { } -void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) { - OStringStream os; - os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " " - << reason.mnemonic; - if (reason.detail != NULL) os << ": " << reason.detail; - Comment("%s", os.c_str()); +void LCodeGenBase::DeoptComment(const char* mnemonic, const char* reason) { + Comment(";;; deoptimize %s: %s", mnemonic, + reason == NULL ? "unknown reason" : reason); } diff --git a/src/lithium-codegen.h b/src/lithium-codegen.h index 301debeb9..d57e614d1 100644 --- a/src/lithium-codegen.h +++ b/src/lithium-codegen.h @@ -8,7 +8,6 @@ #include "src/v8.h" #include "src/compiler.h" -#include "src/deoptimizer.h" namespace v8 { namespace internal { @@ -34,7 +33,7 @@ class LCodeGenBase BASE_EMBEDDED { HGraph* graph() const; void FPRINTF_CHECKING Comment(const char* format, ...); - void DeoptComment(const Deoptimizer::Reason& reason); + void DeoptComment(const char* mnemonic, const char* reason); bool GenerateBody(); virtual void GenerateBodyInstructionPre(LInstruction* instr) {} diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index a9c10b8e5..0154508dc 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -3644,8 +3644,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); + __ JumpIfNotUniqueName(tmp1, &miss); + __ JumpIfNotUniqueName(tmp2, &miss); // Use a0 as result __ mov(v0, a0); @@ -3899,7 +3899,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entity_name, miss); + __ JumpIfNotUniqueName(entity_name, miss); __ bind(&good); // Restore the properties. @@ -4076,7 +4076,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); + __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); } } diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index 568b8bd3c..53915c567 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -1342,24 +1342,6 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { } -void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) { - Comment cnmt(masm_, "[ SuperReference "); - - __ lw(LoadDescriptor::ReceiverRegister(), - MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - - Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol()); - __ li(LoadDescriptor::NameRegister(), home_object_symbol); - - CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId()); - - Label done; - __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value())); - __ CallRuntime(Runtime::kThrowNonMethodError, 0); - __ bind(&done); -} - - void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { @@ -2299,7 +2281,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - __ li(LoadDescriptor::NameRegister(), Operand(key->value())); if (FLAG_vector_ics) { __ li(VectorLoadICDescriptor::SlotRegister(), @@ -2311,21 +2292,6 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { } -void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) { - SetSourcePosition(prop->position()); - Literal* key = prop->key()->AsLiteral(); - DCHECK(!key->value()->IsSmi()); - DCHECK(prop->IsSuperAccess()); - - SuperReference* super_ref = prop->obj()->AsSuperReference(); - EmitLoadHomeObject(super_ref); - __ Push(v0); - VisitForStackValue(super_ref->this_var()); - __ Push(key->value()); - __ CallRuntime(Runtime::kLoadFromSuper, 3); -} - - void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code(); @@ -2619,13 +2585,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) { Expression* key = expr->key(); if (key->IsPropertyName()) { - if (!expr->IsSuperAccess()) { - VisitForAccumulatorValue(expr->obj()); - __ Move(LoadDescriptor::ReceiverRegister(), v0); - EmitNamedPropertyLoad(expr); - } else { - EmitNamedSuperPropertyLoad(expr); - } + VisitForAccumulatorValue(expr->obj()); + __ Move(LoadDescriptor::ReceiverRegister(), v0); + EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(v0); } else { @@ -2665,7 +2627,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { } else { // Load the function from the receiver. DCHECK(callee->IsProperty()); - DCHECK(!callee->AsProperty()->IsSuperAccess()); __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2679,44 +2640,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { } -void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) { - Expression* callee = expr->expression(); - DCHECK(callee->IsProperty()); - Property* prop = callee->AsProperty(); - DCHECK(prop->IsSuperAccess()); - - SetSourcePosition(prop->position()); - Literal* key = prop->key()->AsLiteral(); - DCHECK(!key->value()->IsSmi()); - // Load the function from the receiver. - const Register scratch = a1; - SuperReference* super_ref = prop->obj()->AsSuperReference(); - EmitLoadHomeObject(super_ref); - __ Push(v0); - VisitForAccumulatorValue(super_ref->this_var()); - __ Push(v0); - __ lw(scratch, MemOperand(sp, kPointerSize)); - __ Push(scratch, v0); - __ Push(key->value()); - - // Stack here: - // - home_object - // - this (receiver) - // - home_object <-- LoadFromSuper will pop here and below. - // - this (receiver) - // - key - __ CallRuntime(Runtime::kLoadFromSuper, 3); - - // Replace home_object with target function. - __ sw(v0, MemOperand(sp, kPointerSize)); - - // Stack here: - // - target function - // - this (receiver) - EmitCall(expr, CallICState::METHOD); -} - - // Code common for calls using the IC. void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) { @@ -2884,20 +2807,13 @@ void FullCodeGenerator::VisitCall(Call* expr) { EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); - bool is_named_call = property->key()->IsPropertyName(); - // super.x() is handled in EmitCallWithLoadIC. - if (property->IsSuperAccess() && is_named_call) { - EmitSuperCallWithLoadIC(expr); + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(property->obj()); + } + if (property->key()->IsPropertyName()) { + EmitCallWithLoadIC(expr); } else { - { - PreservePositionScope scope(masm()->positions_recorder()); - VisitForStackValue(property->obj()); - } - if (is_named_call) { - EmitCallWithLoadIC(expr); - } else { - EmitKeyedCallWithLoadIC(expr, property->key()); - } + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { DCHECK(call_type == Call::OTHER_CALL); diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc index 497d10f9e..089fc5cf3 100644 --- a/src/mips/lithium-codegen-mips.cc +++ b/src/mips/lithium-codegen-mips.cc @@ -324,29 +324,31 @@ bool LCodeGen::GenerateDeferredCode() { bool LCodeGen::GenerateJumpTable() { - if (jump_table_.length() > 0) { + if (deopt_jump_table_.length() > 0) { Label needs_frame, call_deopt_entry; Comment(";;; -------------------- Jump table --------------------"); - Address base = jump_table_[0].address; + Address base = deopt_jump_table_[0].address; Register entry_offset = t9; - int length = jump_table_.length(); + int length = deopt_jump_table_.length(); for (int i = 0; i < length; i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); + __ bind(&deopt_jump_table_[i].label); - DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type); - Address entry = table_entry->address; - DeoptComment(table_entry->reason); + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; + DCHECK(type == deopt_jump_table_[0].bailout_type); + Address entry = deopt_jump_table_[i].address; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); // Second-level deopt table entries are contiguous and small, so instead // of loading the full, absolute address of each one, load an immediate // offset which will be added to the base address later. __ li(entry_offset, Operand(entry - base)); - if (table_entry->needs_frame) { + if (deopt_jump_table_[i].needs_frame) { DCHECK(!info()->saves_caller_doubles()); if (needs_frame.is_bound()) { __ Branch(&needs_frame); @@ -817,7 +819,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, Deoptimizer::BailoutType bailout_type, Register src1, const Operand& src2, - const char* detail) { + const char* reason) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); DCHECK(environment->HasBeenRegistered()); @@ -858,36 +860,36 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, __ bind(&skip); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (condition == al && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. - if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); + if (deopt_jump_table_.is_empty() || + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().bailout_type != bailout_type) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); + deopt_jump_table_.Add(table_entry, zone()); } - __ Branch(&jump_table_.last().label, condition, src1, src2); + __ Branch(&deopt_jump_table_.last().label, condition, src1, src2); } } void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, Register src1, const Operand& src2, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail); + DeoptimizeIf(condition, instr, bailout_type, src1, src2, reason); } @@ -4922,11 +4924,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ bind(&check_false); __ LoadRoot(at, Heap::kFalseValueRootIndex); - DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(ne, instr, scratch2, Operand(at)); __ Branch(USE_DELAY_SLOT, &done); __ mov(input_reg, zero_reg); // In delay slot. } else { - DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(ne, instr, scratch1, Operand(at)); // Load the double value. __ ldc1(double_scratch, @@ -4941,15 +4945,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { except_flag, kCheckForInexactConversion); - DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg), - "lost precision or NaN"); + __ RecordComment("Deferred TaggedToI: lost precision or NaN"); + DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Branch(&done, ne, input_reg, Operand(zero_reg)); __ Mfhc1(scratch1, double_scratch); __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); } } __ bind(&done); diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h index 5402c9a7b..f5087a9a6 100644 --- a/src/mips/lithium-codegen-mips.h +++ b/src/mips/lithium-codegen-mips.h @@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase { LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), - jump_table_(4, info->zone()), + deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), @@ -232,11 +232,11 @@ class LCodeGen: public LCodeGenBase { Deoptimizer::BailoutType bailout_type, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg), - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIf(Condition condition, LInstruction* instr, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg), - const char* detail = NULL); + const char* reason = NULL); void AddToTranslation(LEnvironment* environment, Translation* translation, @@ -364,7 +364,7 @@ class LCodeGen: public LCodeGenBase { void EmitVectorLoadICRegisters(T* instr); ZoneList<LEnvironment*> deoptimizations_; - ZoneList<Deoptimizer::JumpTableEntry> jump_table_; + ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 604293bb3..e7940f444 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -3582,8 +3582,8 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, - Label* not_unique_name) { +void MacroAssembler::JumpIfNotUniqueName(Register reg, + Label* not_unique_name) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index ce52986f9..5a7905c7e 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -1483,7 +1483,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label* failure); - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name); void EmitSeqStringSetCharCheck(Register string, Register index, diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc index 60263b524..bd6a016bc 100644 --- a/src/mips64/code-stubs-mips64.cc +++ b/src/mips64/code-stubs-mips64.cc @@ -3681,8 +3681,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); + __ JumpIfNotUniqueName(tmp1, &miss); + __ JumpIfNotUniqueName(tmp2, &miss); // Use a0 as result __ mov(v0, a0); @@ -3937,7 +3937,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entity_name, miss); + __ JumpIfNotUniqueName(entity_name, miss); __ bind(&good); // Restore the properties. @@ -4114,7 +4114,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); + __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); } } diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc index 5c26f1639..291e4ab87 100644 --- a/src/mips64/full-codegen-mips64.cc +++ b/src/mips64/full-codegen-mips64.cc @@ -1337,24 +1337,6 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { } -void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) { - Comment cnmt(masm_, "[ SuperReference "); - - __ ld(LoadDescriptor::ReceiverRegister(), - MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - - Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol()); - __ li(LoadDescriptor::NameRegister(), home_object_symbol); - - CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId()); - - Label done; - __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value())); - __ CallRuntime(Runtime::kThrowNonMethodError, 0); - __ bind(&done); -} - - void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { @@ -2296,8 +2278,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - DCHECK(!prop->IsSuperAccess()); - __ li(LoadDescriptor::NameRegister(), Operand(key->value())); if (FLAG_vector_ics) { __ li(VectorLoadICDescriptor::SlotRegister(), @@ -2309,21 +2289,6 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { } -void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) { - SetSourcePosition(prop->position()); - Literal* key = prop->key()->AsLiteral(); - DCHECK(!key->value()->IsSmi()); - DCHECK(prop->IsSuperAccess()); - - SuperReference* super_ref = prop->obj()->AsSuperReference(); - EmitLoadHomeObject(super_ref); - __ Push(v0); - VisitForStackValue(super_ref->this_var()); - __ Push(key->value()); - __ CallRuntime(Runtime::kLoadFromSuper, 3); -} - - void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); // Call keyed load IC. It has register arguments receiver and key. @@ -2620,13 +2585,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) { Expression* key = expr->key(); if (key->IsPropertyName()) { - if (!expr->IsSuperAccess()) { - VisitForAccumulatorValue(expr->obj()); - __ Move(LoadDescriptor::ReceiverRegister(), v0); - EmitNamedPropertyLoad(expr); - } else { - EmitNamedSuperPropertyLoad(expr); - } + VisitForAccumulatorValue(expr->obj()); + __ Move(LoadDescriptor::ReceiverRegister(), v0); + EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(v0); } else { @@ -2666,7 +2627,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { } else { // Load the function from the receiver. DCHECK(callee->IsProperty()); - DCHECK(!callee->AsProperty()->IsSuperAccess()); __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2680,44 +2640,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { } -void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) { - Expression* callee = expr->expression(); - DCHECK(callee->IsProperty()); - Property* prop = callee->AsProperty(); - DCHECK(prop->IsSuperAccess()); - - SetSourcePosition(prop->position()); - Literal* key = prop->key()->AsLiteral(); - DCHECK(!key->value()->IsSmi()); - // Load the function from the receiver. - const Register scratch = a1; - SuperReference* super_ref = prop->obj()->AsSuperReference(); - EmitLoadHomeObject(super_ref); - __ Push(v0); - VisitForAccumulatorValue(super_ref->this_var()); - __ Push(v0); - __ ld(scratch, MemOperand(sp, kPointerSize)); - __ Push(scratch, v0); - __ Push(key->value()); - - // Stack here: - // - home_object - // - this (receiver) - // - home_object <-- LoadFromSuper will pop here and below. - // - this (receiver) - // - key - __ CallRuntime(Runtime::kLoadFromSuper, 3); - - // Replace home_object with target function. - __ sd(v0, MemOperand(sp, kPointerSize)); - - // Stack here: - // - target function - // - this (receiver) - EmitCall(expr, CallICState::METHOD); -} - - // Code common for calls using the IC. void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) { @@ -2884,20 +2806,13 @@ void FullCodeGenerator::VisitCall(Call* expr) { EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); - bool is_named_call = property->key()->IsPropertyName(); - // super.x() is handled in EmitCallWithLoadIC. - if (property->IsSuperAccess() && is_named_call) { - EmitSuperCallWithLoadIC(expr); + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(property->obj()); + } + if (property->key()->IsPropertyName()) { + EmitCallWithLoadIC(expr); } else { - { - PreservePositionScope scope(masm()->positions_recorder()); - VisitForStackValue(property->obj()); - } - if (is_named_call) { - EmitCallWithLoadIC(expr); - } else { - EmitKeyedCallWithLoadIC(expr, property->key()); - } + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { DCHECK(call_type == Call::OTHER_CALL); diff --git a/src/mips64/lithium-codegen-mips64.cc b/src/mips64/lithium-codegen-mips64.cc index 8a0a44991..de619d6a8 100644 --- a/src/mips64/lithium-codegen-mips64.cc +++ b/src/mips64/lithium-codegen-mips64.cc @@ -300,20 +300,22 @@ bool LCodeGen::GenerateDeferredCode() { bool LCodeGen::GenerateJumpTable() { - if (jump_table_.length() > 0) { + if (deopt_jump_table_.length() > 0) { Comment(";;; -------------------- Jump table --------------------"); } Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Label table_start; __ bind(&table_start); Label needs_frame; - for (int i = 0; i < jump_table_.length(); i++) { - Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; - __ bind(&table_entry->label); - Address entry = table_entry->address; - DeoptComment(table_entry->reason); + for (int i = 0; i < deopt_jump_table_.length(); i++) { + __ bind(&deopt_jump_table_[i].label); + Address entry = deopt_jump_table_[i].address; + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); - if (table_entry->needs_frame) { + if (deopt_jump_table_[i].needs_frame) { DCHECK(!info()->saves_caller_doubles()); if (needs_frame.is_bound()) { __ Branch(&needs_frame); @@ -767,7 +769,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, Deoptimizer::BailoutType bailout_type, Register src1, const Operand& src2, - const char* detail) { + const char* reason) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); DCHECK(environment->HasBeenRegistered()); @@ -808,36 +810,36 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, __ bind(&skip); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (condition == al && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. - if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { - jump_table_.Add(table_entry, zone()); + if (deopt_jump_table_.is_empty() || + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().bailout_type != bailout_type) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); + deopt_jump_table_.Add(table_entry, zone()); } - __ Branch(&jump_table_.last().label, condition, src1, src2); + __ Branch(&deopt_jump_table_.last().label, condition, src1, src2); } } void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, Register src1, const Operand& src2, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail); + DeoptimizeIf(condition, instr, bailout_type, src1, src2, reason); } @@ -4954,11 +4956,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ bind(&check_false); __ LoadRoot(at, Heap::kFalseValueRootIndex); - DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(ne, instr, scratch2, Operand(at)); __ Branch(USE_DELAY_SLOT, &done); __ mov(input_reg, zero_reg); // In delay slot. } else { - DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(ne, instr, scratch1, Operand(at)); // Load the double value. __ ldc1(double_scratch, @@ -4973,15 +4977,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { except_flag, kCheckForInexactConversion); - DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg), - "lost precision or NaN"); + __ RecordComment("Deferred TaggedToI: lost precision or NaN"); + DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Branch(&done, ne, input_reg, Operand(zero_reg)); __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); } } __ bind(&done); diff --git a/src/mips64/lithium-codegen-mips64.h b/src/mips64/lithium-codegen-mips64.h index a4b7adbd5..3207f19fb 100644 --- a/src/mips64/lithium-codegen-mips64.h +++ b/src/mips64/lithium-codegen-mips64.h @@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase { LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), - jump_table_(4, info->zone()), + deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), @@ -233,11 +233,11 @@ class LCodeGen: public LCodeGenBase { Deoptimizer::BailoutType bailout_type, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg), - const char* detail = NULL); + const char* reason = NULL); void DeoptimizeIf(Condition condition, LInstruction* instr, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg), - const char* detail = NULL); + const char* reason = NULL); void AddToTranslation(LEnvironment* environment, Translation* translation, @@ -365,7 +365,7 @@ class LCodeGen: public LCodeGenBase { void EmitVectorLoadICRegisters(T* instr); ZoneList<LEnvironment*> deoptimizations_; - ZoneList<Deoptimizer::JumpTableEntry> jump_table_; + ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc index 12d81bca2..020ffe9f4 100644 --- a/src/mips64/macro-assembler-mips64.cc +++ b/src/mips64/macro-assembler-mips64.cc @@ -3492,8 +3492,8 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, - Label* not_unique_name) { +void MacroAssembler::JumpIfNotUniqueName(Register reg, + Label* not_unique_name) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h index 2da48fbd8..2bf8c3398 100644 --- a/src/mips64/macro-assembler-mips64.h +++ b/src/mips64/macro-assembler-mips64.h @@ -1554,7 +1554,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label* failure); - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name); void EmitSeqStringSetCharCheck(Register string, Register index, diff --git a/src/objects.h b/src/objects.h index 37227f9a0..d88240d37 100644 --- a/src/objects.h +++ b/src/objects.h @@ -1200,6 +1200,9 @@ template <class C> inline bool Is(Object* obj); V(kSmiSubtractionOverflow, "Smi subtraction overflow") \ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \ V(kStackFrameTypesMustMatch, "Stack frame types must match") \ + V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \ + "SwitchStatement: mixed or non-literal switch labels") \ + V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \ V(kTheCurrentStackPointerIsBelowCsp, \ "The current stack pointer is below csp") \ V(kTheInstructionShouldBeALui, "The instruction should be a lui") \ diff --git a/src/runtime.cc b/src/runtime.cc index 3ecf04430..3acbb81d8 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -7971,7 +7971,7 @@ RUNTIME_FUNCTION(Runtime_MathFround) { DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - float xf = DoubleToFloat32(x); + float xf = static_cast<float>(x); return *isolate->factory()->NewNumber(xf); } @@ -13007,16 +13007,6 @@ static MaybeHandle<Object> DebugEvaluate(Isolate* isolate, } -static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) { - Handle<JSObject> result = - isolate->factory()->NewJSObject(isolate->object_function()); - Handle<Map> new_map = Map::Copy(Handle<Map>(result->map())); - new_map->set_prototype(*isolate->factory()->null_value()); - JSObject::MigrateToMap(result, new_map); - return result; -} - - // Evaluate a piece of JavaScript in the context of a stack frame for // debugging. Things that need special attention are: // - Parameters and stack-allocated locals need to be materialized. Altered @@ -13059,7 +13049,8 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) { DCHECK(!context.is_null()); // Materialize stack locals and the arguments object. - Handle<JSObject> materialized = NewJSObjectWithNullProto(isolate); + Handle<JSObject> materialized = + isolate->factory()->NewJSObject(isolate->object_function()); ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, materialized, diff --git a/src/utils.h b/src/utils.h index 2991815e5..c23cf05f6 100644 --- a/src/utils.h +++ b/src/utils.h @@ -26,13 +26,6 @@ namespace internal { // ---------------------------------------------------------------------------- // General helper functions - -// Same as strcmp, but can handle NULL arguments. -inline bool CStringEquals(const char* s1, const char* s2) { - return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0); -} - - // X must be a power of 2. Returns the number of trailing zeros. inline int WhichPowerOf2(uint32_t x) { DCHECK(base::bits::IsPowerOfTwo32(x)); diff --git a/src/version.cc b/src/version.cc index 37678c0c8..b9dd19228 100644 --- a/src/version.cc +++ b/src/version.cc @@ -34,7 +34,7 @@ // system so their names cannot be changed without changing the scripts. #define MAJOR_VERSION 3 #define MINOR_VERSION 29 -#define BUILD_NUMBER 83 +#define BUILD_NUMBER 84 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index e55a1d202..a7d38cd03 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -3454,8 +3454,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); // Unique names are compared by identity. Label done; @@ -3674,8 +3674,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Check if the entry name is not a unique name. __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(entity_name, Map::kInstanceTypeOffset), miss); + __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset), + miss); __ bind(&good); } @@ -3804,9 +3804,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // Check if the entry name is not a unique name. __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(scratch, Map::kInstanceTypeOffset), - &maybe_in_dictionary); + __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), + &maybe_in_dictionary); } } diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 95c4e006d..15003a964 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -306,7 +306,11 @@ bool LCodeGen::GenerateJumpTable() { Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; __ bind(&table_entry->label); Address entry = table_entry->address; - DeoptComment(table_entry->reason); + Deoptimizer::BailoutType type = table_entry->bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + DeoptComment(table_entry->mnemonic, table_entry->reason); if (table_entry->needs_frame) { DCHECK(!info()->saves_caller_doubles()); __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); @@ -725,7 +729,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail, + const char* reason, Deoptimizer::BailoutType bailout_type) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -770,22 +774,22 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, __ bind(&done); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (cc == no_condition && frame_is_built_ && !info()->saves_caller_doubles()) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().bailout_type != bailout_type) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); jump_table_.Add(table_entry, zone()); } if (cc == no_condition) { @@ -798,11 +802,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, detail, bailout_type); + DeoptimizeIf(cc, instr, reason, bailout_type); } @@ -1033,7 +1037,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { __ andl(dividend, Immediate(mask)); __ negl(dividend); if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); } __ jmp(&done, Label::kNear); } @@ -1050,7 +1054,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { DCHECK(ToRegister(instr->result()).is(rax)); if (divisor == 0) { - DeoptimizeIf(no_condition, instr, "division by zero"); + DeoptimizeIf(no_condition, instr); return; } @@ -1065,7 +1069,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { Label remainder_not_zero; __ j(not_zero, &remainder_not_zero, Label::kNear); __ cmpl(dividend, Immediate(0)); - DeoptimizeIf(less, instr, "minus zero"); + DeoptimizeIf(less, instr); __ bind(&remainder_not_zero); } } @@ -1087,7 +1091,7 @@ void LCodeGen::DoModI(LModI* instr) { // deopt in this case because we can't return a NaN. if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { __ testl(right_reg, right_reg); - DeoptimizeIf(zero, instr, "division by zero"); + DeoptimizeIf(zero, instr); } // Check for kMinInt % -1, idiv would signal a divide error. We @@ -1098,7 +1102,7 @@ void LCodeGen::DoModI(LModI* instr) { __ j(not_zero, &no_overflow_possible, Label::kNear); __ cmpl(right_reg, Immediate(-1)); if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(equal, instr, "minus zero"); + DeoptimizeIf(equal, instr); } else { __ j(not_equal, &no_overflow_possible, Label::kNear); __ Set(result_reg, 0); @@ -1118,7 +1122,7 @@ void LCodeGen::DoModI(LModI* instr) { __ j(not_sign, &positive_left, Label::kNear); __ idivl(right_reg); __ testl(result_reg, result_reg); - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); __ jmp(&done, Label::kNear); __ bind(&positive_left); } @@ -1144,13 +1148,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { // If the divisor is negative, we have to negate and handle edge cases. __ negl(dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); } // Dividing by -1 is basically negation, unless we overflow. if (divisor == -1) { if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } return; } @@ -1177,7 +1181,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { DCHECK(ToRegister(instr->result()).is(rdx)); if (divisor == 0) { - DeoptimizeIf(no_condition, instr, "division by zero"); + DeoptimizeIf(no_condition, instr); return; } @@ -1185,7 +1189,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { HMathFloorOfDiv* hdiv = instr->hydrogen(); if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); } // Easy case: We need no dynamic check for the dividend and the flooring @@ -1232,7 +1236,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { __ testl(divisor, divisor); - DeoptimizeIf(zero, instr, "division by zero"); + DeoptimizeIf(zero, instr); } // Check for (0 / -x) that will produce negative zero. @@ -1241,7 +1245,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { __ testl(dividend, dividend); __ j(not_zero, ÷nd_not_zero, Label::kNear); __ testl(divisor, divisor); - DeoptimizeIf(sign, instr, "minus zero"); + DeoptimizeIf(sign, instr); __ bind(÷nd_not_zero); } @@ -1251,7 +1255,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { __ cmpl(dividend, Immediate(kMinInt)); __ j(not_zero, ÷nd_not_min_int, Label::kNear); __ cmpl(divisor, Immediate(-1)); - DeoptimizeIf(zero, instr, "overflow"); + DeoptimizeIf(zero, instr); __ bind(÷nd_not_min_int); } @@ -1280,19 +1284,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { HDiv* hdiv = instr->hydrogen(); if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); } // Check for (kMinInt / -1). if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { __ cmpl(dividend, Immediate(kMinInt)); - DeoptimizeIf(zero, instr, "overflow"); + DeoptimizeIf(zero, instr); } // Deoptimize if remainder will not be 0. if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && divisor != 1 && divisor != -1) { int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); __ testl(dividend, Immediate(mask)); - DeoptimizeIf(not_zero, instr, "remainder not zero"); + DeoptimizeIf(not_zero, instr); } __ Move(result, dividend); int32_t shift = WhichPowerOf2Abs(divisor); @@ -1313,7 +1317,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { DCHECK(ToRegister(instr->result()).is(rdx)); if (divisor == 0) { - DeoptimizeIf(no_condition, instr, "division by zero"); + DeoptimizeIf(no_condition, instr); return; } @@ -1321,7 +1325,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { HDiv* hdiv = instr->hydrogen(); if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { __ testl(dividend, dividend); - DeoptimizeIf(zero, instr, "minus zero"); + DeoptimizeIf(zero, instr); } __ TruncatingDiv(dividend, Abs(divisor)); @@ -1331,7 +1335,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { __ movl(rax, rdx); __ imull(rax, rax, Immediate(divisor)); __ subl(rax, dividend); - DeoptimizeIf(not_equal, instr, "remainder not zero"); + DeoptimizeIf(not_equal, instr); } } @@ -1351,7 +1355,7 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { __ testl(divisor, divisor); - DeoptimizeIf(zero, instr, "division by zero"); + DeoptimizeIf(zero, instr); } // Check for (0 / -x) that will produce negative zero. @@ -1360,7 +1364,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ testl(dividend, dividend); __ j(not_zero, ÷nd_not_zero, Label::kNear); __ testl(divisor, divisor); - DeoptimizeIf(sign, instr, "minus zero"); + DeoptimizeIf(sign, instr); __ bind(÷nd_not_zero); } @@ -1370,7 +1374,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ cmpl(dividend, Immediate(kMinInt)); __ j(not_zero, ÷nd_not_min_int, Label::kNear); __ cmpl(divisor, Immediate(-1)); - DeoptimizeIf(zero, instr, "overflow"); + DeoptimizeIf(zero, instr); __ bind(÷nd_not_min_int); } @@ -1381,7 +1385,7 @@ void LCodeGen::DoDivI(LDivI* instr) { if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { // Deoptimize if remainder is not 0. __ testl(remainder, remainder); - DeoptimizeIf(not_zero, instr, "remainder not zero"); + DeoptimizeIf(not_zero, instr); } } @@ -1458,7 +1462,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } if (can_overflow) { - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { @@ -1477,10 +1481,10 @@ void LCodeGen::DoMulI(LMulI* instr) { ? !instr->hydrogen_value()->representation().IsSmi() : SmiValuesAre31Bits()); if (ToInteger32(LConstantOperand::cast(right)) < 0) { - DeoptimizeIf(no_condition, instr, "minus zero"); + DeoptimizeIf(no_condition, instr); } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { __ cmpl(kScratchRegister, Immediate(0)); - DeoptimizeIf(less, instr, "minus zero"); + DeoptimizeIf(less, instr); } } else if (right->IsStackSlot()) { if (instr->hydrogen_value()->representation().IsSmi()) { @@ -1488,7 +1492,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } else { __ orl(kScratchRegister, ToOperand(right)); } - DeoptimizeIf(sign, instr, "minus zero"); + DeoptimizeIf(sign, instr); } else { // Test the non-zero operand for negative sign. if (instr->hydrogen_value()->representation().IsSmi()) { @@ -1496,7 +1500,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } else { __ orl(kScratchRegister, ToRegister(right)); } - DeoptimizeIf(sign, instr, "minus zero"); + DeoptimizeIf(sign, instr); } __ bind(&done); } @@ -1609,7 +1613,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { __ shrl_cl(ToRegister(left)); if (instr->can_deopt()) { __ testl(ToRegister(left), ToRegister(left)); - DeoptimizeIf(negative, instr, "value to shift was negative"); + DeoptimizeIf(negative, instr); } break; case Token::SHL: @@ -1638,7 +1642,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { __ shrl(ToRegister(left), Immediate(shift_count)); } else if (instr->can_deopt()) { __ testl(ToRegister(left), ToRegister(left)); - DeoptimizeIf(negative, instr, "value to shift was negative"); + DeoptimizeIf(negative, instr); } break; case Token::SHL: @@ -1653,7 +1657,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { __ shll(ToRegister(left), Immediate(shift_count - 1)); } __ Integer32ToSmi(ToRegister(left), ToRegister(left)); - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } else { __ shll(ToRegister(left), Immediate(shift_count)); } @@ -1696,7 +1700,7 @@ void LCodeGen::DoSubI(LSubI* instr) { } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } } @@ -1761,9 +1765,9 @@ void LCodeGen::DoDateField(LDateField* instr) { DCHECK(object.is(rax)); Condition cc = masm()->CheckSmi(object); - DeoptimizeIf(cc, instr, "not an object"); + DeoptimizeIf(cc, instr); __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); - DeoptimizeIf(not_equal, instr, "not a date object"); + DeoptimizeIf(not_equal, instr); if (index->value() == 0) { __ movp(result, FieldOperand(object, JSDate::kValueOffset)); @@ -1927,7 +1931,7 @@ void LCodeGen::DoAddI(LAddI* instr) { } } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } } } @@ -2172,7 +2176,7 @@ void LCodeGen::DoBranch(LBranch* instr) { } else if (expected.NeedsMap()) { // If we need a map later and have a Smi -> deopt. __ testb(reg, Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr, "Smi"); + DeoptimizeIf(zero, instr); } const Register map = kScratchRegister; @@ -2226,7 +2230,7 @@ void LCodeGen::DoBranch(LBranch* instr) { if (!expected.IsGeneric()) { // We've seen something for the first time -> deopt. // This can only happen if we are not generic already. - DeoptimizeIf(no_condition, instr, "unexpected object"); + DeoptimizeIf(no_condition, instr); } } } @@ -2842,7 +2846,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); if (instr->hydrogen()->RequiresHoleCheck()) { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); } } @@ -2890,7 +2894,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { DCHECK(!value.is(cell)); __ Move(cell, cell_handle, RelocInfo::CELL); __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); // Store the value. __ movp(Operand(cell, 0), value); } else { @@ -2909,7 +2913,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { if (instr->hydrogen()->RequiresHoleCheck()) { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); } else { Label is_not_hole; __ j(not_equal, &is_not_hole, Label::kNear); @@ -2930,7 +2934,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { if (instr->hydrogen()->RequiresHoleCheck()) { __ CompareRoot(target, Heap::kTheHoleValueRootIndex); if (instr->hydrogen()->DeoptimizesOnHole()) { - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); } else { __ j(not_equal, &skip_assignment); } @@ -3028,7 +3032,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { // Check that the function has a prototype or an initial map. __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); // If the function does not have an initial map, we're done. Label done; @@ -3140,7 +3144,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { __ movl(result, operand); if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { __ testl(result, result); - DeoptimizeIf(negative, instr, "negative value"); + DeoptimizeIf(negative, instr); } break; case EXTERNAL_FLOAT32_ELEMENTS: @@ -3179,7 +3183,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { FAST_DOUBLE_ELEMENTS, instr->base_offset() + sizeof(kHoleNanLower32)); __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); } Operand double_load_operand = BuildFastArrayOperand( @@ -3236,10 +3240,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { if (requires_hole_check) { if (IsFastSmiElementsKind(hinstr->elements_kind())) { Condition smi = __ CheckSmi(result); - DeoptimizeIf(NegateCondition(smi), instr, "not a Smi"); + DeoptimizeIf(NegateCondition(smi), instr); } else { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr, "hole"); + DeoptimizeIf(equal, instr); } } } @@ -3388,9 +3392,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { // The receiver should be a JS object. Condition is_smi = __ CheckSmi(receiver); - DeoptimizeIf(is_smi, instr, "not an object"); + DeoptimizeIf(is_smi, instr); __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); - DeoptimizeIf(below, instr, "not a spec object"); + DeoptimizeIf(below, instr); __ jmp(&receiver_ok, Label::kNear); __ bind(&global_object); @@ -3417,7 +3421,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // adaptor frame below it. const uint32_t kArgumentsLimit = 1 * KB; __ cmpp(length, Immediate(kArgumentsLimit)); - DeoptimizeIf(above, instr, "too many arguments"); + DeoptimizeIf(above, instr); __ Push(receiver); __ movp(receiver, length); @@ -3612,7 +3616,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { Register input_reg = ToRegister(instr->value()); __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(not_equal, instr, "not a heap number"); + DeoptimizeIf(not_equal, instr); Label slow, allocated, done; Register tmp = input_reg.is(rax) ? rcx : rax; @@ -3658,7 +3662,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { Label is_positive; __ j(not_sign, &is_positive, Label::kNear); __ negl(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, "overflow"); + DeoptimizeIf(negative, instr); __ bind(&is_positive); } @@ -3669,7 +3673,7 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { Label is_positive; __ j(not_sign, &is_positive, Label::kNear); __ negp(input_reg); // Sets flags. - DeoptimizeIf(negative, instr, "overflow"); + DeoptimizeIf(negative, instr); __ bind(&is_positive); } @@ -3724,18 +3728,18 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { // Deoptimize if minus zero. __ movq(output_reg, input_reg); __ subq(output_reg, Immediate(1)); - DeoptimizeIf(overflow, instr, "minus zero"); + DeoptimizeIf(overflow, instr); } __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); __ cvttsd2si(output_reg, xmm_scratch); __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } else { Label negative_sign, done; // Deoptimize on unordered. __ xorps(xmm_scratch, xmm_scratch); // Zero the register. __ ucomisd(input_reg, xmm_scratch); - DeoptimizeIf(parity_even, instr, "unordered"); + DeoptimizeIf(parity_even, instr); __ j(below, &negative_sign, Label::kNear); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { @@ -3744,7 +3748,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ j(above, &positive_sign, Label::kNear); __ movmskpd(output_reg, input_reg); __ testq(output_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, "minus zero"); + DeoptimizeIf(not_zero, instr); __ Set(output_reg, 0); __ jmp(&done); __ bind(&positive_sign); @@ -3754,7 +3758,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ cvttsd2si(output_reg, input_reg); // Overflow is signalled with minint. __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); __ jmp(&done, Label::kNear); // Non-zero negative reaches here. @@ -3765,7 +3769,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ ucomisd(input_reg, xmm_scratch); __ j(equal, &done, Label::kNear); __ subl(output_reg, Immediate(1)); - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); __ bind(&done); } @@ -3792,7 +3796,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cvttsd2si(output_reg, xmm_scratch); // Overflow is signalled with minint. __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, "conversion overflow"); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(overflow, instr); __ jmp(&done, dist); __ bind(&below_one_half); @@ -3808,7 +3813,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cvttsd2si(output_reg, input_temp); // Catch minint due to overflow, and to prevent overflow when compensating. __ cmpl(output_reg, Immediate(0x1)); - DeoptimizeIf(overflow, instr, "conversion overflow"); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(overflow, instr); __ Cvtlsi2sd(xmm_scratch, output_reg); __ ucomisd(xmm_scratch, input_temp); @@ -3823,7 +3829,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) { if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ movq(output_reg, input_reg); __ testq(output_reg, output_reg); - DeoptimizeIf(negative, instr, "minus zero"); + __ RecordComment("Minus zero"); + DeoptimizeIf(negative, instr); } __ Set(output_reg, 0); __ bind(&done); @@ -3902,7 +3909,7 @@ void LCodeGen::DoPower(LPower* instr) { Label no_deopt; __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); - DeoptimizeIf(not_equal, instr, "not a heap number"); + DeoptimizeIf(not_equal, instr); __ bind(&no_deopt); MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); @@ -4274,7 +4281,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { __ int3(); __ bind(&done); } else { - DeoptimizeIf(cc, instr, "out of bounds"); + DeoptimizeIf(cc, instr); } } @@ -4522,7 +4529,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register temp = ToRegister(instr->temp()); Label no_memento_found; __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); - DeoptimizeIf(equal, instr, "memento found"); + DeoptimizeIf(equal, instr); __ bind(&no_memento_found); } @@ -4841,12 +4848,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) { if (hchange->CheckFlag(HValue::kCanOverflow) && hchange->value()->CheckFlag(HValue::kUint32)) { Condition is_smi = __ CheckUInteger32ValidSmiValue(input); - DeoptimizeIf(NegateCondition(is_smi), instr, "not a smi"); + DeoptimizeIf(NegateCondition(is_smi), instr); } __ Integer32ToSmi(output, input); if (hchange->CheckFlag(HValue::kCanOverflow) && !hchange->value()->CheckFlag(HValue::kUint32)) { - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } } @@ -4856,7 +4863,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { Register input = ToRegister(instr->value()); if (instr->needs_check()) { Condition is_smi = __ CheckSmi(input); - DeoptimizeIf(NegateCondition(is_smi), instr, "not a smi"); + DeoptimizeIf(NegateCondition(is_smi), instr); } else { __ AssertSmi(input); } @@ -4887,7 +4894,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, if (can_convert_undefined_to_nan) { __ j(not_equal, &convert, Label::kNear); } else { - DeoptimizeIf(not_equal, instr, "not a heap number"); + DeoptimizeIf(not_equal, instr); } if (deoptimize_on_minus_zero) { @@ -4897,7 +4904,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, __ j(not_equal, &done, Label::kNear); __ movmskpd(kScratchRegister, result_reg); __ testq(kScratchRegister, Immediate(1)); - DeoptimizeIf(not_zero, instr, "minus zero"); + DeoptimizeIf(not_zero, instr); } __ jmp(&done, Label::kNear); @@ -4906,7 +4913,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, // Convert undefined (and hole) to NaN. Compute NaN as 0/0. __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(not_equal, instr, "neither a heap number nor undefined"); + DeoptimizeIf(not_equal, instr); __ xorps(result_reg, result_reg); __ divsd(result_reg, result_reg); @@ -4953,26 +4960,31 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { __ bind(&check_false); __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); - DeoptimizeIf(not_equal, instr, "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(not_equal, instr); __ Set(input_reg, 0); } else { XMMRegister scratch = ToDoubleRegister(instr->temp()); DCHECK(!scratch.is(xmm0)); __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); - DeoptimizeIf(not_equal, instr, "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(not_equal, instr); __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ cvttsd2si(input_reg, xmm0); __ Cvtlsi2sd(scratch, input_reg); __ ucomisd(xmm0, scratch); - DeoptimizeIf(not_equal, instr, "lost precision"); - DeoptimizeIf(parity_even, instr, "NaN"); + __ RecordComment("Deferred TaggedToI: lost precision"); + DeoptimizeIf(not_equal, instr); + __ RecordComment("Deferred TaggedToI: NaN"); + DeoptimizeIf(parity_even, instr); if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { __ testl(input_reg, input_reg); __ j(not_zero, done); __ movmskpd(input_reg, xmm0); __ andl(input_reg, Immediate(1)); - DeoptimizeIf(not_zero, instr, "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(not_zero, instr); } } } @@ -5043,7 +5055,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { __ jmp(&done, Label::kNear); __ bind(&bailout); - DeoptimizeIf(no_condition, instr, "conversion failed"); + DeoptimizeIf(no_condition, instr); __ bind(&done); } } @@ -5065,18 +5077,18 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { __ jmp(&done, Label::kNear); __ bind(&bailout); - DeoptimizeIf(no_condition, instr, "conversion failed"); + DeoptimizeIf(no_condition, instr); __ bind(&done); __ Integer32ToSmi(result_reg, result_reg); - DeoptimizeIf(overflow, instr, "overflow"); + DeoptimizeIf(overflow, instr); } void LCodeGen::DoCheckSmi(LCheckSmi* instr) { LOperand* input = instr->value(); Condition cc = masm()->CheckSmi(ToRegister(input)); - DeoptimizeIf(NegateCondition(cc), instr, "not a Smi"); + DeoptimizeIf(NegateCondition(cc), instr); } @@ -5084,7 +5096,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { if (!instr->hydrogen()->value()->type().IsHeapObject()) { LOperand* input = instr->value(); Condition cc = masm()->CheckSmi(ToRegister(input)); - DeoptimizeIf(cc, instr, "Smi"); + DeoptimizeIf(cc, instr); } } @@ -5104,14 +5116,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { // If there is only one type in the interval check for equality. if (first == last) { - DeoptimizeIf(not_equal, instr, "wrong instance type"); + DeoptimizeIf(not_equal, instr); } else { - DeoptimizeIf(below, instr, "wrong instance type"); + DeoptimizeIf(below, instr); // Omit check for the last type. if (last != LAST_TYPE) { __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), Immediate(static_cast<int8_t>(last))); - DeoptimizeIf(above, instr, "wrong instance type"); + DeoptimizeIf(above, instr); } } } else { @@ -5123,13 +5135,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), Immediate(mask)); - DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type"); + DeoptimizeIf(tag == 0 ? not_zero : zero, instr); } else { __ movzxbl(kScratchRegister, FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); __ andb(kScratchRegister, Immediate(mask)); __ cmpb(kScratchRegister, Immediate(tag)); - DeoptimizeIf(not_equal, instr, "wrong instance type"); + DeoptimizeIf(not_equal, instr); } } } @@ -5138,7 +5150,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckValue(LCheckValue* instr) { Register reg = ToRegister(instr->value()); __ Cmp(reg, instr->hydrogen()->object().handle()); - DeoptimizeIf(not_equal, instr, "value mismatch"); + DeoptimizeIf(not_equal, instr); } @@ -5153,7 +5165,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { __ testp(rax, Immediate(kSmiTagMask)); } - DeoptimizeIf(zero, instr, "instance migration failed"); + DeoptimizeIf(zero, instr); } @@ -5206,7 +5218,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { if (instr->hydrogen()->HasMigrationTarget()) { __ j(not_equal, deferred->entry()); } else { - DeoptimizeIf(not_equal, instr, "wrong map"); + DeoptimizeIf(not_equal, instr); } __ bind(&success); @@ -5245,7 +5257,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { // Check for undefined. Undefined is converted to zero for clamping // conversions. __ Cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr, "neither a heap number nor undefined"); + DeoptimizeIf(not_equal, instr); __ xorl(input_reg, input_reg); __ jmp(&done, Label::kNear); @@ -5726,19 +5738,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { DCHECK(ToRegister(instr->context()).is(rsi)); __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(equal, instr, "undefined"); + DeoptimizeIf(equal, instr); Register null_value = rdi; __ LoadRoot(null_value, Heap::kNullValueRootIndex); __ cmpp(rax, null_value); - DeoptimizeIf(equal, instr, "null"); + DeoptimizeIf(equal, instr); Condition cc = masm()->CheckSmi(rax); - DeoptimizeIf(cc, instr, "Smi"); + DeoptimizeIf(cc, instr); STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); - DeoptimizeIf(below_equal, instr, "wrong instance type"); + DeoptimizeIf(below_equal, instr); Label use_cache, call_runtime; __ CheckEnumCache(null_value, &call_runtime); @@ -5753,7 +5765,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), Heap::kMetaMapRootIndex); - DeoptimizeIf(not_equal, instr, "not a meta map"); + DeoptimizeIf(not_equal, instr); __ bind(&use_cache); } @@ -5775,7 +5787,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { FieldOperand(result, FixedArray::SizeFor(instr->idx()))); __ bind(&done); Condition cc = masm()->CheckSmi(result); - DeoptimizeIf(cc, instr, "Smi"); + DeoptimizeIf(cc, instr); } @@ -5783,7 +5795,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { Register object = ToRegister(instr->value()); __ cmpp(ToRegister(instr->map()), FieldOperand(object, HeapObject::kMapOffset)); - DeoptimizeIf(not_equal, instr, "wrong map"); + DeoptimizeIf(not_equal, instr); } diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h index ccd90b53c..bf10e34bb 100644 --- a/src/x64/lithium-codegen-x64.h +++ b/src/x64/lithium-codegen-x64.h @@ -206,9 +206,10 @@ class LCodeGen: public LCodeGenBase { int argc); void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail, + void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason, Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail); + void DeoptimizeIf(Condition cc, LInstruction* instr, + const char* reason = NULL); bool DeoptEveryNTimes() { return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index fceaad8fe..50ca8f2f5 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -2701,16 +2701,16 @@ static void JumpIfNotUniqueNameHelper(MacroAssembler* masm, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand, - Label* not_unique_name, - Label::Distance distance) { +void MacroAssembler::JumpIfNotUniqueName(Operand operand, + Label* not_unique_name, + Label::Distance distance) { JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance); } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, - Label* not_unique_name, - Label::Distance distance) { +void MacroAssembler::JumpIfNotUniqueName(Register reg, + Label* not_unique_name, + Label::Distance distance) { JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance); } diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 219b7ff0a..b4f7dd771 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -797,10 +797,10 @@ class MacroAssembler: public Assembler { uint32_t encoding_mask); // Checks if the given register or operand is a unique name - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name, - Label::Distance distance = Label::kFar); - void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name, - Label::Distance distance = Label::kFar); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name, + Label::Distance distance = Label::kFar); + void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, + Label::Distance distance = Label::kFar); // --------------------------------------------------------------------------- // Macro instructions. diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h index 6555ccdd8..25ecfcf13 100644 --- a/src/x87/assembler-x87-inl.h +++ b/src/x87/assembler-x87-inl.h @@ -45,7 +45,7 @@ namespace v8 { namespace internal { -bool CpuFeatures::SupportsCrankshaft() { return true; } +bool CpuFeatures::SupportsCrankshaft() { return false; } static const byte kCallOpcode = 0xE8; diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc index 9e1c8836f..8f9224915 100644 --- a/src/x87/assembler-x87.cc +++ b/src/x87/assembler-x87.cc @@ -1519,20 +1519,6 @@ void Assembler::fst_s(const Operand& adr) { } -void Assembler::fldcw(const Operand& adr) { - EnsureSpace ensure_space(this); - EMIT(0xD9); - emit_operand(ebp, adr); -} - - -void Assembler::fnstcw(const Operand& adr) { - EnsureSpace ensure_space(this); - EMIT(0xD9); - emit_operand(edi, adr); -} - - void Assembler::fstp_d(const Operand& adr) { EnsureSpace ensure_space(this); EMIT(0xDD); @@ -1612,13 +1598,6 @@ void Assembler::fchs() { } -void Assembler::fsqrt() { - EnsureSpace ensure_space(this); - EMIT(0xD9); - EMIT(0xFA); -} - - void Assembler::fcos() { EnsureSpace ensure_space(this); EMIT(0xD9); @@ -1680,13 +1659,6 @@ void Assembler::fadd_i(int i) { } -void Assembler::fadd_d(const Operand& adr) { - EnsureSpace ensure_space(this); - EMIT(0xDC); - emit_operand(eax, adr); -} - - void Assembler::fsub(int i) { EnsureSpace ensure_space(this); emit_farith(0xDC, 0xE8, i); @@ -1800,13 +1772,6 @@ void Assembler::ftst() { } -void Assembler::fxam() { - EnsureSpace ensure_space(this); - EMIT(0xD9); - EMIT(0xE5); -} - - void Assembler::fucomp(int i) { EnsureSpace ensure_space(this); emit_farith(0xDD, 0xE8, i); @@ -1868,20 +1833,6 @@ void Assembler::fnclex() { } -void Assembler::fnsave(const Operand& adr) { - EnsureSpace ensure_space(this); - EMIT(0xDD); - emit_operand(esi, adr); -} - - -void Assembler::frstor(const Operand& adr) { - EnsureSpace ensure_space(this); - EMIT(0xDD); - emit_operand(esp, adr); -} - - void Assembler::sahf() { EnsureSpace ensure_space(this); EMIT(0x9E); diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h index d37c9d77d..a2bedcc3c 100644 --- a/src/x87/assembler-x87.h +++ b/src/x87/assembler-x87.h @@ -142,7 +142,7 @@ inline Register Register::FromAllocationIndex(int index) { struct X87Register { - static const int kMaxNumAllocatableRegisters = 6; + static const int kMaxNumAllocatableRegisters = 8; static const int kMaxNumRegisters = 8; static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; @@ -852,7 +852,6 @@ class Assembler : public AssemblerBase { void fabs(); void fchs(); - void fsqrt(); void fcos(); void fsin(); void fptan(); @@ -863,7 +862,6 @@ class Assembler : public AssemblerBase { void fadd(int i); void fadd_i(int i); - void fadd_d(const Operand& adr); void fsub(int i); void fsub_i(int i); void fmul(int i); @@ -886,19 +884,14 @@ class Assembler : public AssemblerBase { void ffree(int i = 0); void ftst(); - void fxam(); void fucomp(int i); void fucompp(); void fucomi(int i); void fucomip(); void fcompp(); void fnstsw_ax(); - void fldcw(const Operand& adr); - void fnstcw(const Operand& adr); void fwait(); void fnclex(); - void fnsave(const Operand& adr); - void frstor(const Operand& adr); void frndint(); diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc index d6311752c..6857cdc26 100644 --- a/src/x87/builtins-x87.cc +++ b/src/x87/builtins-x87.cc @@ -660,8 +660,7 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, - SaveFPRegsMode save_doubles) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) { // Enter an internal frame. { FrameScope scope(masm, StackFrame::INTERNAL); @@ -670,7 +669,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, // stubs that tail call the runtime on deopts passing their parameters in // registers. __ pushad(); - __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0); __ popad(); // Tear down internal frame. } @@ -681,12 +680,13 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { - Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); + Generate_NotifyStubFailureHelper(masm); } void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { - Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); + // SaveDoubles is meanless for X87, just used by deoptimizer.cc + Generate_NotifyStubFailureHelper(masm); } diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc index d4c383be8..35514c356 100644 --- a/src/x87/code-stubs-x87.cc +++ b/src/x87/code-stubs-x87.cc @@ -127,11 +127,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // store the registers in any particular way, but we do have to store and // restore them. __ pushad(); - if (save_doubles()) { - // Save FPU stat in m108byte. - __ sub(esp, Immediate(108)); - __ fnsave(Operand(esp, 0)); - } const int argument_count = 1; AllowExternalCallThatCantCauseGC scope(masm); @@ -141,11 +136,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { __ CallCFunction( ExternalReference::store_buffer_overflow_function(isolate()), argument_count); - if (save_doubles()) { - // Restore FPU stat in m108byte. - __ frstor(Operand(esp, 0)); - __ add(esp, Immediate(108)); - } __ popad(); __ ret(0); } @@ -1125,12 +1115,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(ecx, eax); __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); - __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi, - kDontSaveFPRegs); + __ RecordWriteField(ebx, + RegExpImpl::kLastSubjectOffset, + eax, + edi); __ mov(eax, ecx); __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); - __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi, - kDontSaveFPRegs); + __ RecordWriteField(ebx, + RegExpImpl::kLastInputOffset, + eax, + edi); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -1624,8 +1618,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { __ push(edi); __ push(ebx); __ push(edx); - __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ pop(edx); __ pop(ebx); __ pop(edi); @@ -1996,19 +1989,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { void CodeStub::GenerateFPStubs(Isolate* isolate) { - CEntryStub save_doubles(isolate, 1, kSaveFPRegs); - // Stubs might already be in the snapshot, detect that and don't regenerate, - // which would lead to code stub initialization state being messed up. - Code* save_doubles_code; - if (!save_doubles.FindCodeInCache(&save_doubles_code)) { - save_doubles_code = *(save_doubles.GetCode()); - } - isolate->set_fp_stubs_generated(true); + // Do nothing. } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(isolate, 1, kDontSaveFPRegs); + CEntryStub stub(isolate, 1); stub.GetCode(); } @@ -2024,7 +2010,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { ProfileEntryHookStub::MaybeCallEntryHook(masm); // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(save_doubles()); + __ EnterExitFrame(); // ebx: pointer to C function (C callee-saved) // ebp: frame pointer (restored after C call) @@ -2080,7 +2066,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles()); + __ LeaveExitFrame(); __ ret(0); // Handling of exception. @@ -3167,8 +3153,8 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear); - __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); // Unique names are compared by identity. Label done; @@ -3393,8 +3379,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Check if the entry name is not a unique name. __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(entity_name, Map::kInstanceTypeOffset), miss); + __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset), + miss); __ bind(&good); } @@ -3528,9 +3514,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // Check if the entry name is not a unique name. __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); - __ JumpIfNotUniqueNameInstanceType( - FieldOperand(scratch, Map::kInstanceTypeOffset), - &maybe_in_dictionary); + __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), + &maybe_in_dictionary); } } @@ -3560,8 +3545,6 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); stub.GetCode(); - StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); - stub2.GetCode(); } @@ -3581,7 +3564,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { __ jmp(&skip_to_incremental_compacting, Label::kFar); if (remembered_set_action() == EMIT_REMEMBERED_SET) { - __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), + __ RememberedSetHelper(object(), address(), value(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); @@ -3625,7 +3608,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { mode); InformIncrementalMarker(masm); regs_.Restore(masm); - __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), + __ RememberedSetHelper(object(), address(), value(), MacroAssembler::kReturnAtEnd); __ bind(&dont_need_remembered_set); @@ -3642,7 +3625,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { - regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode()); + regs_.SaveCallerSaveRegisters(masm); int argument_count = 3; __ PrepareCallCFunction(argument_count, regs_.scratch0()); __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); @@ -3655,7 +3638,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); - regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); + regs_.RestoreCallerSaveRegisters(masm); } @@ -3686,7 +3669,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( regs_.Restore(masm); if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), + __ RememberedSetHelper(object(), address(), value(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); @@ -3731,7 +3714,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( regs_.Restore(masm); if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), + __ RememberedSetHelper(object(), address(), value(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); @@ -3801,7 +3784,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { FixedArrayBase::kHeaderSize)); __ mov(Operand(ecx, 0), eax); // Update the write barrier for the array store. - __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET, + __ RecordWrite(ebx, ecx, eax, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ ret(0); @@ -3830,7 +3814,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(isolate(), 1, kSaveFPRegs); + CEntryStub ces(isolate(), 1); __ call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h index 03ff477f6..49462bc5c 100644 --- a/src/x87/code-stubs-x87.h +++ b/src/x87/code-stubs-x87.h @@ -116,9 +116,11 @@ class NameDictionaryLookupStub: public PlatformCodeStub { class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Isolate* isolate, Register object, Register value, - Register address, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode) + RecordWriteStub(Isolate* isolate, + Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action) : PlatformCodeStub(isolate), regs_(object, // An input reg. address, // An input reg. @@ -126,8 +128,7 @@ class RecordWriteStub: public PlatformCodeStub { minor_key_ = ObjectBits::encode(object.code()) | ValueBits::encode(value.code()) | AddressBits::encode(address.code()) | - RememberedSetActionBits::encode(remembered_set_action) | - SaveFPRegsModeBits::encode(fp_mode); + RememberedSetActionBits::encode(remembered_set_action); } RecordWriteStub(uint32_t key, Isolate* isolate) @@ -270,23 +271,12 @@ class RecordWriteStub: public PlatformCodeStub { // saved registers that were not already preserved. The caller saved // registers are eax, ecx and edx. The three scratch registers (incl. ecx) // will be restored by other means so we don't bother pushing them here. - void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + void SaveCallerSaveRegisters(MacroAssembler* masm) { if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); - if (mode == kSaveFPRegs) { - // Save FPU state in m108byte. - masm->sub(esp, Immediate(108)); - masm->fnsave(Operand(esp, 0)); - } } - inline void RestoreCallerSaveRegisters(MacroAssembler* masm, - SaveFPRegsMode mode) { - if (mode == kSaveFPRegs) { - // Restore FPU state in m108byte. - masm->frstor(Operand(esp, 0)); - masm->add(esp, Immediate(108)); - } + inline void RestoreCallerSaveRegisters(MacroAssembler*masm) { if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax); } @@ -358,15 +348,10 @@ class RecordWriteStub: public PlatformCodeStub { return RememberedSetActionBits::decode(minor_key_); } - SaveFPRegsMode save_fp_regs_mode() const { - return SaveFPRegsModeBits::decode(minor_key_); - } - class ObjectBits: public BitField<int, 0, 3> {}; class ValueBits: public BitField<int, 3, 3> {}; class AddressBits: public BitField<int, 6, 3> {}; class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {}; - class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {}; RegisterAllocation regs_; diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc index e33959e65..56d273cf0 100644 --- a/src/x87/codegen-x87.cc +++ b/src/x87/codegen-x87.cc @@ -217,8 +217,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // Set transitioned map. __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map); - __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, - kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + scratch, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } @@ -271,8 +275,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Replace receiver's backing store with newly created FixedDoubleArray. __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); __ mov(ebx, eax); - __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(edx, + JSObject::kElementsOffset, + ebx, + edi, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); @@ -331,8 +339,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // ebx: target map // Set transitioned map. __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs, - OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } @@ -387,8 +399,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // Set transitioned map. __ bind(&only_change_map); __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs, - OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ jmp(&success); // Call into runtime if GC is required. @@ -417,7 +433,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); __ mov(esi, ebx); - __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET, + __ RecordWriteArray(eax, + edx, + esi, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ jmp(&entry, Label::kNear); @@ -436,12 +455,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // edx: receiver // Set transitioned map. __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs, - OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); // Replace receiver's backing store with newly created and filled FixedArray. __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); - __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteField(edx, + JSObject::kElementsOffset, + eax, + edi, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); // Restore registers. __ pop(eax); diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc index a76c7a709..e873ac573 100644 --- a/src/x87/deoptimizer-x87.cc +++ b/src/x87/deoptimizer-x87.cc @@ -204,10 +204,8 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { - for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) { - double double_value = input_->GetDoubleRegister(i); - output_frame->SetDoubleRegister(i, double_value); - } + // Do nothing for X87. + return; } @@ -232,42 +230,9 @@ void Deoptimizer::EntryGenerator::Generate() { // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; - - const int kDoubleRegsSize = - kDoubleSize * X87Register::kMaxNumAllocatableRegisters; - - // Reserve space for x87 fp registers. - __ sub(esp, Immediate(kDoubleRegsSize)); - __ pushad(); - // GP registers are safe to use now. - // Save used x87 fp registers in correct position of previous reserve space. - Label loop, done; - // Get the layout of x87 stack. - __ sub(esp, Immediate(kPointerSize)); - __ fistp_s(MemOperand(esp, 0)); - __ pop(eax); - // Preserve stack layout in edi - __ mov(edi, eax); - // Get the x87 stack depth, the first 3 bits. - __ mov(ecx, eax); - __ and_(ecx, 0x7); - __ j(zero, &done, Label::kNear); - - __ bind(&loop); - __ shr(eax, 0x3); - __ mov(ebx, eax); - __ and_(ebx, 0x7); // Extract the st_x index into ebx. - // Pop TOS to the correct position. The disp(0x20) is due to pushad. - // The st_i should be saved to (esp + ebx * kDoubleSize + 0x20). - __ fstp_d(Operand(esp, ebx, times_8, 0x20)); - __ dec(ecx); // Decrease stack depth. - __ j(not_zero, &loop, Label::kNear); - __ bind(&done); - - const int kSavedRegistersAreaSize = - kNumberOfRegisters * kPointerSize + kDoubleRegsSize; + const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize; // Get the bailout id from the stack. __ mov(ebx, Operand(esp, kSavedRegistersAreaSize)); @@ -280,7 +245,6 @@ void Deoptimizer::EntryGenerator::Generate() { __ sub(edx, ebp); __ neg(edx); - __ push(edi); // Allocate a new deoptimizer object. __ PrepareCallCFunction(6, eax); __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); @@ -296,8 +260,6 @@ void Deoptimizer::EntryGenerator::Generate() { __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } - __ pop(edi); - // Preserve deoptimizer object in register eax and get the input // frame descriptor pointer. __ mov(ebx, Operand(eax, Deoptimizer::input_offset())); @@ -308,22 +270,13 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(Operand(ebx, offset)); } - int double_regs_offset = FrameDescription::double_registers_offset(); - // Fill in the double input registers. - for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize; - __ fld_d(Operand(esp, src_offset)); - __ fstp_d(Operand(ebx, dst_offset)); - } - // Clear FPU all exceptions. // TODO(ulan): Find out why the TOP register is not zero here in some cases, // and check that the generated code never deoptimizes with unbalanced stack. __ fnclex(); // Remove the bailout id, return address and the double registers. - __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); // Compute a pointer to the unwinding limit in register ecx; that is // the first stack slot not part of the input frame. @@ -345,7 +298,6 @@ void Deoptimizer::EntryGenerator::Generate() { __ j(not_equal, &pop_loop); // Compute the output frame in the deoptimizer. - __ push(edi); __ push(eax); __ PrepareCallCFunction(1, ebx); __ mov(Operand(esp, 0 * kPointerSize), eax); @@ -355,7 +307,6 @@ void Deoptimizer::EntryGenerator::Generate() { ExternalReference::compute_output_frames_function(isolate()), 1); } __ pop(eax); - __ pop(edi); // If frame was dynamically aligned, pop padding. Label no_padding; @@ -394,25 +345,6 @@ void Deoptimizer::EntryGenerator::Generate() { __ cmp(eax, edx); __ j(below, &outer_push_loop); - - // In case of a failed STUB, we have to restore the x87 stack. - // x87 stack layout is in edi. - Label loop2, done2; - // Get the x87 stack depth, the first 3 bits. - __ mov(ecx, edi); - __ and_(ecx, 0x7); - __ j(zero, &done2, Label::kNear); - - __ lea(ecx, Operand(ecx, ecx, times_2, 0)); - __ bind(&loop2); - __ mov(eax, edi); - __ shr_cl(eax); - __ and_(eax, 0x7); - __ fld_d(Operand(ebx, eax, times_8, double_regs_offset)); - __ sub(ecx, Immediate(0x3)); - __ j(not_zero, &loop2, Label::kNear); - __ bind(&done2); - // Push state, pc, and continuation from the last output frame. __ push(Operand(ebx, FrameDescription::state_offset())); __ push(Operand(ebx, FrameDescription::pc_offset())); diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc index 908e8b043..53a8c2906 100644 --- a/src/x87/disasm-x87.cc +++ b/src/x87/disasm-x87.cc @@ -702,12 +702,7 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode, case 0: mnem = "fld_s"; break; case 2: mnem = "fst_s"; break; case 3: mnem = "fstp_s"; break; - case 5: - mnem = "fldcw"; - break; - case 7: - mnem = "fnstcw"; - break; + case 7: mnem = "fstcw"; break; default: UnimplementedInstruction(); } break; @@ -721,27 +716,11 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode, } break; - case 0xDC: - switch (regop) { - case 0: - mnem = "fadd_d"; - break; - default: - UnimplementedInstruction(); - } - break; - case 0xDD: switch (regop) { case 0: mnem = "fld_d"; break; case 1: mnem = "fisttp_d"; break; case 2: mnem = "fst_d"; break; case 3: mnem = "fstp_d"; break; - case 4: - mnem = "frstor"; - break; - case 6: - mnem = "fnsave"; - break; default: UnimplementedInstruction(); } break; diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc index 58328e06f..94ccbcfb5 100644 --- a/src/x87/full-codegen-x87.cc +++ b/src/x87/full-codegen-x87.cc @@ -221,8 +221,10 @@ void FullCodeGenerator::Generate() { __ mov(Operand(esi, context_offset), eax); // Update the write barrier. This clobbers eax and ebx. if (need_write_barrier) { - __ RecordWriteContextSlot(esi, context_offset, eax, ebx, - kDontSaveFPRegs); + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx); } else if (FLAG_debug_code) { Label done; __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); @@ -706,7 +708,7 @@ void FullCodeGenerator::SetVar(Variable* var, if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); - __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); + __ RecordWriteContextSlot(scratch0, offset, src, scratch1); } } @@ -836,9 +838,12 @@ void FullCodeGenerator::VisitFunctionDeclaration( VisitForAccumulatorValue(declaration->fun()); __ mov(ContextOperand(esi, variable->index()), result_register()); // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()), - result_register(), ecx, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + result_register(), + ecx, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); break; } @@ -872,8 +877,11 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { // Assign it. __ mov(ContextOperand(esi, variable->index()), eax); // We know that we have written a module, which is not a smi. - __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()), eax, - ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + eax, + ecx, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); @@ -1775,8 +1783,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { // Store the subexpression value in the array's elements. __ mov(FieldOperand(ebx, offset), result_register()); // Update the write barrier for the array store. - __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); + __ RecordWriteField(ebx, offset, result_register(), ecx, + EMIT_REMEMBERED_SET, + INLINE_SMI_CHECK); } else { // Store the subexpression value in the array's elements. __ mov(ecx, Immediate(Smi::FromInt(i))); @@ -1933,8 +1942,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { Immediate(Smi::FromInt(continuation.pos()))); __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); __ mov(ecx, esi); - __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx, - kDontSaveFPRegs); + __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx); __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset)); __ cmp(esp, ebx); __ j(equal, &post_runtime); @@ -2008,8 +2016,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { Immediate(Smi::FromInt(l_continuation.pos()))); __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); __ mov(ecx, esi); - __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx, - kDontSaveFPRegs); + __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx); __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ mov(context_register(), Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2217,8 +2224,8 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) { // Only the value field needs a write barrier, as the other values are in the // root set. - __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, ecx, - edx, kDontSaveFPRegs); + __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, + ecx, edx); } @@ -2426,7 +2433,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot( if (var->IsContextSlot()) { __ mov(edx, eax); int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs); + __ RecordWriteContextSlot(ecx, offset, edx, ebx); } } @@ -3525,7 +3532,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. __ mov(edx, eax); - __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs); + __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx); __ bind(&done); context()->Plug(eax); diff --git a/src/x87/lithium-codegen-x87.cc b/src/x87/lithium-codegen-x87.cc index ff68fd0a6..ded2cd94a 100644 --- a/src/x87/lithium-codegen-x87.cc +++ b/src/x87/lithium-codegen-x87.cc @@ -254,8 +254,10 @@ bool LCodeGen::GeneratePrologue() { __ mov(Operand(esi, context_offset), eax); // Update the write barrier. This clobbers eax and ebx. if (need_write_barrier) { - __ RecordWriteContextSlot(esi, context_offset, eax, ebx, - kDontSaveFPRegs); + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx); } else if (FLAG_debug_code) { Label done; __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); @@ -267,8 +269,6 @@ bool LCodeGen::GeneratePrologue() { Comment(";;; End allocate local context"); } - // Initailize FPU state. - __ fninit(); // Trace the call. if (FLAG_trace && info()->IsOptimizing()) { // We have not executed any compiled code yet, so esi still holds the @@ -327,9 +327,6 @@ void LCodeGen::GenerateOsrPrologue() { int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); DCHECK(slots >= 1); __ sub(esp, Immediate((slots - 1) * kPointerSize)); - - // Initailize FPU state. - __ fninit(); } @@ -345,21 +342,8 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { - // When return from function call, FPU should be initialized again. - if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) { - bool double_result = instr->HasDoubleRegisterResult(); - if (double_result) { - __ lea(esp, Operand(esp, -kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - } - __ fninit(); - if (double_result) { - __ fld_d(Operand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); - } - } if (instr->IsGoto()) { - x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this); + x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); } else if (FLAG_debug_code && FLAG_enable_slow_asserts && !instr->IsGap() && !instr->IsReturn()) { if (instr->ClobbersDoubleRegisters(isolate())) { @@ -383,7 +367,11 @@ bool LCodeGen::GenerateJumpTable() { Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; __ bind(&table_entry->label); Address entry = table_entry->address; - DeoptComment(table_entry->reason); + Deoptimizer::BailoutType type = table_entry->bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + DeoptComment(table_entry->mnemonic, table_entry->reason); if (table_entry->needs_frame) { DCHECK(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); @@ -506,27 +494,10 @@ void LCodeGen::X87LoadForUsage(X87Register reg) { void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { DCHECK(x87_stack_.Contains(reg1)); DCHECK(x87_stack_.Contains(reg2)); - if (reg1.is(reg2) && x87_stack_.depth() == 1) { - __ fld(x87_stack_.st(reg1)); - x87_stack_.push(reg1); - x87_stack_.pop(); - x87_stack_.pop(); - } else { - x87_stack_.Fxch(reg1, 1); - x87_stack_.Fxch(reg2); - x87_stack_.pop(); - x87_stack_.pop(); - } -} - - -int LCodeGen::X87Stack::GetLayout() { - int layout = stack_depth_; - for (int i = 0; i < stack_depth_; i++) { - layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3)); - } - - return layout; + x87_stack_.Fxch(reg1, 1); + x87_stack_.Fxch(reg2); + x87_stack_.pop(); + x87_stack_.pop(); } @@ -601,22 +572,6 @@ void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { } -void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) { - if (x87_stack_.Contains(dst)) { - x87_stack_.Fxch(dst); - __ fstp(0); - x87_stack_.pop(); - // Push ST(i) onto the FPU register stack - __ fld(x87_stack_.st(src)); - x87_stack_.push(dst); - } else { - // Push ST(i) onto the FPU register stack - __ fld(x87_stack_.st(src)); - x87_stack_.push(dst); - } -} - - void LCodeGen::X87Fld(Operand src, X87OperandType opts) { DCHECK(!src.is_reg_only()); switch (opts) { @@ -642,9 +597,6 @@ void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { case kX87DoubleOperand: __ fst_d(dst); break; - case kX87FloatOperand: - __ fst_s(dst); - break; case kX87IntOperand: __ fist_s(dst); break; @@ -708,39 +660,15 @@ void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { } -void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr, - LCodeGen* cgen) { - // For going to a joined block, an explicit LClobberDoubles is inserted before - // LGoto. Because all used x87 registers are spilled to stack slots. The - // ResolvePhis phase of register allocator could guarantee the two input's x87 - // stacks have the same layout. So don't check stack_depth_ <= 1 here. - int goto_block_id = goto_instr->block_id(); - if (current_block_id + 1 != goto_block_id) { +void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { + DCHECK(stack_depth_ <= 1); + // If ever used for new stubs producing two pairs of doubles joined into two + // phis this assert hits. That situation is not handled, since the two stacks + // might have st0 and st1 swapped. + if (current_block_id + 1 != goto_instr->block_id()) { // If we have a value on the x87 stack on leaving a block, it must be a // phi input. If the next block we compile is not the join block, we have // to discard the stack state. - // Before discarding the stack state, we need to save it if the "goto block" - // has unreachable last predecessor when FLAG_unreachable_code_elimination. - if (FLAG_unreachable_code_elimination) { - int length = goto_instr->block()->predecessors()->length(); - bool has_unreachable_last_predecessor = false; - for (int i = 0; i < length; i++) { - HBasicBlock* block = goto_instr->block()->predecessors()->at(i); - if (block->IsUnreachable() && - (block->block_id() + 1) == goto_block_id) { - has_unreachable_last_predecessor = true; - } - } - if (has_unreachable_last_predecessor) { - if (cgen->x87_stack_map_.find(goto_block_id) == - cgen->x87_stack_map_.end()) { - X87Stack* stack = new (cgen->zone()) X87Stack(*this); - cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack)); - } - } - } - - // Discard the stack state. stack_depth_ = 0; } } @@ -750,14 +678,13 @@ void LCodeGen::EmitFlushX87ForDeopt() { // The deoptimizer does not support X87 Registers. But as long as we // deopt from a stub its not a problem, since we will re-materialize the // original stub inputs, which can't be double registers. - // DCHECK(info()->IsStub()); + DCHECK(info()->IsStub()); if (FLAG_debug_code && FLAG_enable_slow_asserts) { __ pushfd(); __ VerifyX87StackDepth(x87_stack_.depth()); __ popfd(); } - - // Flush X87 stack in the deoptimizer entry. + for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); } @@ -964,9 +891,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment, } else { translation->StoreInt32Register(reg); } - } else if (op->IsDoubleRegister()) { - X87Register reg = ToX87Register(op); - translation->StoreDoubleRegister(reg); } else if (op->IsConstantOperand()) { HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); @@ -1001,12 +925,13 @@ void LCodeGen::CallCode(Handle<Code> code, } -void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, - LInstruction* instr, SaveFPRegsMode save_doubles) { +void LCodeGen::CallRuntime(const Runtime::Function* fun, + int argc, + LInstruction* instr) { DCHECK(instr != NULL); DCHECK(instr->HasPointerMap()); - __ CallRuntime(fun, argc, save_doubles); + __ CallRuntime(fun, argc); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); @@ -1036,7 +961,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, LOperand* context) { LoadContextFromDeferred(context); - __ CallRuntimeSaveDoubles(id); + __ CallRuntime(id); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); @@ -1082,7 +1007,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization( void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail, + const char* reason, Deoptimizer::BailoutType bailout_type) { LEnvironment* environment = instr->environment(); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -1110,12 +1035,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, __ pop(eax); __ popfd(); DCHECK(frame_is_built_); - // Put the x87 stack layout in TOS. - if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); - __ push(Immediate(x87_stack_.GetLayout())); - __ fild_s(MemOperand(esp, 0)); - // Don't touch eflags. - __ lea(esp, Operand(esp, kPointerSize)); __ call(entry, RelocInfo::RUNTIME_ENTRY); __ bind(&no_deopt); __ mov(Operand::StaticVariable(count), eax); @@ -1123,18 +1042,14 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, __ popfd(); } - // Put the x87 stack layout in TOS, so that we can save x87 fp registers in - // the correct location. - { + // Before Instructions which can deopt, we normally flush the x87 stack. But + // we can have inputs or outputs of the current instruction on the stack, + // thus we need to flush them here from the physical stack to leave it in a + // consistent state. + if (x87_stack_.depth() > 0) { Label done; if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); - if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); - - int x87_stack_layout = x87_stack_.GetLayout(); - __ push(Immediate(x87_stack_layout)); - __ fild_s(MemOperand(esp, 0)); - // Don't touch eflags. - __ lea(esp, Operand(esp, kPointerSize)); + EmitFlushX87ForDeopt(); __ bind(&done); } @@ -1145,19 +1060,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, __ bind(&done); } - Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), detail); DCHECK(info()->IsStub() || frame_is_built_); if (cc == no_condition && frame_is_built_) { - DeoptComment(reason); + DeoptComment(instr->Mnemonic(), reason); __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { - Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, - !frame_is_built_); // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (jump_table_.is_empty() || - !table_entry.IsEquivalentTo(jump_table_.last())) { + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().bailout_type != bailout_type) { + Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason, + bailout_type, !frame_is_built_); jump_table_.Add(table_entry, zone()); } if (cc == no_condition) { @@ -1170,11 +1085,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail) { + const char* reason) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(cc, instr, detail, bailout_type); + DeoptimizeIf(cc, instr, reason, bailout_type); } @@ -1321,16 +1236,6 @@ void LCodeGen::DoLabel(LLabel* label) { LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); - if (label->block()->predecessors()->length() > 1) { - // A join block's x87 stack is that of its last visited predecessor. - // If the last visited predecessor block is unreachable, the stack state - // will be wrong. In such case, use the x87 stack of reachable predecessor. - X87StackMap::const_iterator it = x87_stack_map_.find(current_block_); - // Restore x87 stack. - if (it != x87_stack_map_.end()) { - x87_stack_ = *(it->second); - } - } DoGap(label); } @@ -1832,7 +1737,7 @@ void LCodeGen::DoMulI(LMulI* instr) { // Bail out if the result is supposed to be negative zero. Label done; __ test(left, Operand(left)); - __ j(not_zero, &done); + __ j(not_zero, &done, Label::kNear); if (right->IsConstantOperand()) { if (ToInteger32(LConstantOperand::cast(right)) < 0) { DeoptimizeIf(no_condition, instr); @@ -2213,58 +2118,8 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { } __ bind(&return_left); } else { - DCHECK(instr->hydrogen()->representation().IsDouble()); - Label check_nan_left, check_zero, return_left, return_right; - Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; - X87Register left_reg = ToX87Register(left); - X87Register right_reg = ToX87Register(right); - - X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result())); - __ fld(1); - __ fld(1); - __ FCmp(); - __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. - __ j(equal, &check_zero, Label::kNear); // left == right. - __ j(condition, &return_left, Label::kNear); - __ jmp(&return_right, Label::kNear); - - __ bind(&check_zero); - __ fld(0); - __ fldz(); - __ FCmp(); - __ j(not_equal, &return_left, Label::kNear); // left == right != 0. - // At this point, both left and right are either 0 or -0. - if (operation == HMathMinMax::kMathMin) { - // Push st0 and st1 to stack, then pop them to temp registers and OR them, - // load it to left. - Register scratch_reg = ToRegister(instr->temp()); - __ fld(1); - __ fld(1); - __ sub(esp, Immediate(2 * kPointerSize)); - __ fstp_s(MemOperand(esp, 0)); - __ fstp_s(MemOperand(esp, kPointerSize)); - __ pop(scratch_reg); - __ xor_(MemOperand(esp, 0), scratch_reg); - X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand); - __ pop(scratch_reg); // restore esp - } else { - // Since we operate on +0 and/or -0, addsd and andsd have the same effect. - X87Fxch(left_reg); - __ fadd(1); - } - __ jmp(&return_left, Label::kNear); - - __ bind(&check_nan_left); - __ fld(0); - __ fld(0); - __ FCmp(); // NaN check. - __ j(parity_even, &return_left, Label::kNear); // left == NaN. - - __ bind(&return_right); - X87Fxch(left_reg); - X87Mov(left_reg, right_reg); - - __ bind(&return_left); + // TODO(weiliang) use X87 for double representation. + UNIMPLEMENTED(); } } @@ -2309,13 +2164,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { UNREACHABLE(); break; } - - // Only always explicitly storing to memory to force the round-down for double - // arithmetic. - __ lea(esp, Operand(esp, -kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ fld_d(Operand(esp, 0)); - __ lea(esp, Operand(esp, kDoubleSize)); } @@ -2369,11 +2217,7 @@ void LCodeGen::DoBranch(LBranch* instr) { __ test(reg, Operand(reg)); EmitBranch(instr, not_zero); } else if (r.IsDouble()) { - X87Register reg = ToX87Register(instr->value()); - X87LoadForUsage(reg); - __ fldz(); - __ FCmp(); - EmitBranch(instr, not_zero); + UNREACHABLE(); } else { DCHECK(r.IsTagged()); Register reg = ToRegister(instr->value()); @@ -2629,10 +2473,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { DCHECK(!rep.IsInteger32()); if (rep.IsDouble()) { - X87Register input = ToX87Register(instr->value()); - X87LoadForUsage(input); - __ FXamMinusZero(); - EmitBranch(instr, equal); + UNREACHABLE(); } else { Register value = ToRegister(instr->value()); Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); @@ -3217,8 +3058,12 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Register temp = ToRegister(instr->temp()); int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs, - EMIT_REMEMBERED_SET, check_needed); + __ RecordWriteContextSlot(context, + offset, + value, + temp, + EMIT_REMEMBERED_SET, + check_needed); } __ bind(&skip_assignment); @@ -3887,9 +3732,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsDouble()) { - X87Register value = ToX87Register(instr->value()); - X87Fxch(value); - __ fabs(); + UNIMPLEMENTED(); } else if (r.IsSmiOrInteger32()) { EmitIntegerMathAbs(instr); } else { // Tagged case. @@ -3905,347 +3748,47 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { void LCodeGen::DoMathFloor(LMathFloor* instr) { - Register output_reg = ToRegister(instr->result()); - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - - Label not_minus_zero, done; - // Deoptimize on unordered. - __ fldz(); - __ fld(1); - __ FCmp(); - DeoptimizeIf(parity_even, instr); - __ j(below, ¬_minus_zero, Label::kNear); - - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Check for negative zero. - __ j(not_equal, ¬_minus_zero, Label::kNear); - // +- 0.0. - __ fld(0); - __ FXamSign(); - DeoptimizeIf(not_zero, instr); - __ Move(output_reg, Immediate(0)); - __ jmp(&done, Label::kFar); - } - - // Positive input. - // rc=01B, round down. - __ bind(¬_minus_zero); - __ fnclex(); - __ X87SetRC(0x0400); - __ sub(esp, Immediate(kPointerSize)); - __ fist_s(Operand(esp, 0)); - __ pop(output_reg); - __ X87CheckIA(); - DeoptimizeIf(equal, instr); - __ fnclex(); - __ X87SetRC(0x0000); - __ bind(&done); + UNIMPLEMENTED(); } void LCodeGen::DoMathRound(LMathRound* instr) { - X87Register input_reg = ToX87Register(instr->value()); - Register result = ToRegister(instr->result()); - X87Fxch(input_reg); - Label below_one_half, below_minus_one_half, done; - - ExternalReference one_half = ExternalReference::address_of_one_half(); - ExternalReference minus_one_half = - ExternalReference::address_of_minus_one_half(); - - __ fld_d(Operand::StaticVariable(one_half)); - __ fld(1); - __ FCmp(); - __ j(carry, &below_one_half); - - // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x) - __ fld(0); - __ fadd_d(Operand::StaticVariable(one_half)); - // rc=11B, round toward zero. - __ X87SetRC(0x0c00); - __ sub(esp, Immediate(kPointerSize)); - // Clear exception bits. - __ fnclex(); - __ fistp_s(MemOperand(esp, 0)); - // Check overflow. - __ X87CheckIA(); - __ pop(result); - DeoptimizeIf(equal, instr, "conversion overflow"); - __ fnclex(); - // Restore round mode. - __ X87SetRC(0x0000); - __ jmp(&done); - - __ bind(&below_one_half); - __ fld_d(Operand::StaticVariable(minus_one_half)); - __ fld(1); - __ FCmp(); - __ j(carry, &below_minus_one_half); - // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if - // we can ignore the difference between a result of -0 and +0. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // If the sign is positive, we return +0. - __ fld(0); - __ FXamSign(); - DeoptimizeIf(not_zero, instr, "minus zero"); - } - __ Move(result, Immediate(0)); - __ jmp(&done); - - __ bind(&below_minus_one_half); - __ fld(0); - __ fadd_d(Operand::StaticVariable(one_half)); - // rc=01B, round down. - __ X87SetRC(0x0400); - __ sub(esp, Immediate(kPointerSize)); - // Clear exception bits. - __ fnclex(); - __ fistp_s(MemOperand(esp, 0)); - // Check overflow. - __ X87CheckIA(); - __ pop(result); - DeoptimizeIf(equal, instr, "conversion overflow"); - __ fnclex(); - // Restore round mode. - __ X87SetRC(0x0000); - - __ bind(&done); + UNIMPLEMENTED(); } void LCodeGen::DoMathFround(LMathFround* instr) { - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - __ sub(esp, Immediate(kPointerSize)); - __ fstp_s(MemOperand(esp, 0)); - X87Fld(MemOperand(esp, 0), kX87FloatOperand); - __ add(esp, Immediate(kPointerSize)); + UNIMPLEMENTED(); } void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - X87Register input_reg = ToX87Register(instr->value()); - X87Register output_reg = ToX87Register(instr->result()); - DCHECK(output_reg.is(input_reg)); - USE(output_reg); - X87Fxch(input_reg); - __ fsqrt(); + UNIMPLEMENTED(); } void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - X87Register input_reg = ToX87Register(instr->value()); - DCHECK(ToX87Register(instr->result()).is(input_reg)); - X87Fxch(input_reg); - // Note that according to ECMA-262 15.8.2.13: - // Math.pow(-Infinity, 0.5) == Infinity - // Math.sqrt(-Infinity) == NaN - Label done, sqrt; - // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1 - __ fxam(); - __ push(eax); - __ fnstsw_ax(); - __ and_(eax, Immediate(0x4700)); - __ cmp(eax, Immediate(0x0700)); - __ j(not_equal, &sqrt, Label::kNear); - // If input is -Infinity, return Infinity. - __ fchs(); - __ jmp(&done, Label::kNear); - - // Square root. - __ bind(&sqrt); - __ fldz(); - __ faddp(); // Convert -0 to +0. - __ fsqrt(); - __ bind(&done); - __ pop(eax); + UNIMPLEMENTED(); } void LCodeGen::DoPower(LPower* instr) { - Representation exponent_type = instr->hydrogen()->right()->representation(); - X87Register result = ToX87Register(instr->result()); - // Having marked this as a call, we can use any registers. - X87Register base = ToX87Register(instr->left()); - ExternalReference one_half = ExternalReference::address_of_one_half(); - - if (exponent_type.IsSmi()) { - Register exponent = ToRegister(instr->right()); - X87LoadForUsage(base); - __ SmiUntag(exponent); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - } else if (exponent_type.IsTagged()) { - Register exponent = ToRegister(instr->right()); - Register temp = exponent.is(ecx) ? eax : ecx; - Label no_deopt, done; - X87LoadForUsage(base); - __ JumpIfSmi(exponent, &no_deopt); - __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp); - DeoptimizeIf(not_equal, instr); - // Heap number(double) - __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset)); - __ jmp(&done); - // SMI - __ bind(&no_deopt); - __ SmiUntag(exponent); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - __ bind(&done); - } else if (exponent_type.IsInteger32()) { - Register exponent = ToRegister(instr->right()); - X87LoadForUsage(base); - __ push(exponent); - __ fild_s(MemOperand(esp, 0)); - __ pop(exponent); - } else { - DCHECK(exponent_type.IsDouble()); - X87Register exponent_double = ToX87Register(instr->right()); - X87LoadForUsage(base, exponent_double); - } - - // FP data stack {base, exponent(TOS)}. - // Handle (exponent==+-0.5 && base == -0). - Label not_plus_0; - __ fld(0); - __ fabs(); - X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand); - __ FCmp(); - __ j(parity_even, ¬_plus_0, Label::kNear); // NaN. - __ j(not_equal, ¬_plus_0, Label::kNear); - __ fldz(); - // FP data stack {base, exponent(TOS), zero}. - __ faddp(2); - __ bind(¬_plus_0); - - { - __ PrepareCallCFunction(4, eax); - __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value. - __ fstp_d(MemOperand(esp, 0)); // Base value. - X87PrepareToWrite(result); - __ CallCFunction(ExternalReference::power_double_double_function(isolate()), - 4); - // Return value is in st(0) on ia32. - X87CommitWrite(result); - } + UNIMPLEMENTED(); } void LCodeGen::DoMathLog(LMathLog* instr) { - DCHECK(instr->value()->Equals(instr->result())); - X87Register input_reg = ToX87Register(instr->value()); - X87Fxch(input_reg); - - Label positive, done, zero, nan_result; - __ fldz(); - __ fld(1); - __ FCmp(); - __ j(below, &nan_result, Label::kNear); - __ j(equal, &zero, Label::kNear); - // Positive input. - // {input, ln2}. - __ fldln2(); - // {ln2, input}. - __ fxch(); - // {result}. - __ fyl2x(); - __ jmp(&done, Label::kNear); - - __ bind(&nan_result); - ExternalReference nan = - ExternalReference::address_of_canonical_non_hole_nan(); - X87PrepareToWrite(input_reg); - __ fld_d(Operand::StaticVariable(nan)); - X87CommitWrite(input_reg); - __ jmp(&done, Label::kNear); - - __ bind(&zero); - ExternalReference ninf = ExternalReference::address_of_negative_infinity(); - X87PrepareToWrite(input_reg); - __ fld_d(Operand::StaticVariable(ninf)); - X87CommitWrite(input_reg); - - __ bind(&done); + UNIMPLEMENTED(); } void LCodeGen::DoMathClz32(LMathClz32* instr) { - Register input = ToRegister(instr->value()); - Register result = ToRegister(instr->result()); - Label not_zero_input; - __ bsr(result, input); - - __ j(not_zero, ¬_zero_input); - __ Move(result, Immediate(63)); // 63^31 == 32 - - __ bind(¬_zero_input); - __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. + UNIMPLEMENTED(); } void LCodeGen::DoMathExp(LMathExp* instr) { - X87Register input = ToX87Register(instr->value()); - X87Register result_reg = ToX87Register(instr->result()); - Register temp_result = ToRegister(instr->temp1()); - Register temp = ToRegister(instr->temp2()); - Label slow, done, smi, finish; - DCHECK(result_reg.is(input)); - - // Store input into Heap number and call runtime function kMathExpRT. - if (FLAG_inline_new) { - __ AllocateHeapNumber(temp_result, temp, no_reg, &slow); - __ jmp(&done, Label::kNear); - } - - // Slow case: Call the runtime system to do the number allocation. - __ bind(&slow); - { - // TODO(3095996): Put a valid pointer value in the stack slot where the - // result register is stored, as this register is in the pointer map, but - // contains an integer value. - __ Move(temp_result, Immediate(0)); - - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(temp_result, eax); - } - __ bind(&done); - X87LoadForUsage(input); - __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); - - { - // Preserve the value of all registers. - PushSafepointRegistersScope scope(this); - - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ push(temp_result); - __ CallRuntimeSaveDoubles(Runtime::kMathExpRT); - RecordSafepointWithRegisters(instr->pointer_map(), 0, - Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(temp_result, eax); - } - X87PrepareToWrite(result_reg); - // return value of MathExpRT is Smi or Heap Number. - __ JumpIfSmi(temp_result, &smi); - // Heap number(double) - __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); - __ jmp(&finish); - // SMI - __ bind(&smi); - __ SmiUntag(temp_result); - __ push(temp_result); - __ fild_s(MemOperand(esp, 0)); - __ pop(temp_result); - __ bind(&finish); - X87CommitWrite(result_reg); + UNIMPLEMENTED(); } @@ -4342,7 +3885,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { void LCodeGen::DoCallRuntime(LCallRuntime* instr) { DCHECK(ToRegister(instr->context()).is(esi)); - CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); + CallRuntime(instr->function(), instr->arity(), instr); } @@ -4413,7 +3956,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ mov(temp_map, transition); __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); // Update the write barrier for the map field. - __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); + __ RecordWriteForMap(object, transition, temp_map, temp); } } @@ -4448,7 +3991,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register value = ToRegister(instr->value()); Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; // Update the write barrier for the object for in-object properties. - __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs, + __ RecordWriteField(write_register, + offset, + value, + temp, EMIT_REMEMBERED_SET, instr->hydrogen()->SmiCheckForWriteBarrier(), instr->hydrogen()->PointersToHereCheckForValue()); @@ -4508,7 +4054,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { instr->base_offset())); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS) { - X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand); + __ fld(0); + __ fstp_s(operand); } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { X87Mov(operand, ToX87Register(instr->value())); @@ -4644,7 +4191,10 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. __ lea(key, operand); - __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET, + __ RecordWrite(elements, + key, + value, + EMIT_REMEMBERED_SET, check_needed, instr->hydrogen()->PointersToHereCheckForValue()); } @@ -4707,7 +4257,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { // Write barrier. DCHECK_NE(instr->temp(), NULL); __ RecordWriteForMap(object_reg, to_map, new_map_reg, - ToRegister(instr->temp()), kDontSaveFPRegs); + ToRegister(instr->temp())); } else { DCHECK(ToRegister(instr->context()).is(esi)); DCHECK(object_reg.is(eax)); @@ -4977,7 +4527,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + __ CallRuntime(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, eax); @@ -5007,9 +4557,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { // Put the value to the top of stack X87Register src = ToX87Register(instr->value()); - // Don't use X87LoadForUsage here, which is only used by Instruction which - // clobbers fp registers. - x87_stack_.Fxch(src); + X87LoadForUsage(src); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr, x87_stack_); @@ -5020,7 +4568,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); } @@ -5038,7 +4586,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + __ CallRuntime(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, eax); @@ -5087,7 +4635,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg, X87PrepareToWrite(res_reg); if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. - __ JumpIfSmi(input_reg, &load_smi); + __ JumpIfSmi(input_reg, &load_smi, Label::kNear); // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), @@ -5096,7 +4644,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg, DeoptimizeIf(not_equal, instr); } else { Label heap_number, convert; - __ j(equal, &heap_number); + __ j(equal, &heap_number, Label::kNear); // Convert undefined (or hole) to NaN. __ cmp(input_reg, factory()->undefined_value()); @@ -5178,14 +4726,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { __ bind(&check_false); __ cmp(input_reg, factory()->false_value()); - DeoptimizeIf(not_equal, instr, "cannot truncate"); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(not_equal, instr); __ Move(input_reg, Immediate(0)); } else { // TODO(olivf) Converting a number on the fpu is actually quite slow. We // should first try a fast conversion and then bailout to this slow case. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); - DeoptimizeIf(not_equal, instr, "not a heap number"); + __ RecordComment("Deferred TaggedToI: not a heap number"); + DeoptimizeIf(not_equal, instr); __ sub(esp, Immediate(kPointerSize)); __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); @@ -5201,12 +4751,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { __ j(equal, &no_precision_lost, Label::kNear); __ fstp(0); - DeoptimizeIf(no_condition, instr, "lost precision"); + __ RecordComment("Deferred TaggedToI: lost precision"); + DeoptimizeIf(no_condition, instr); __ bind(&no_precision_lost); __ j(parity_odd, ¬_nan); __ fstp(0); - DeoptimizeIf(no_condition, instr, "NaN"); + __ RecordComment("Deferred TaggedToI: NaN"); + DeoptimizeIf(no_condition, instr); __ bind(¬_nan); __ test(input_reg, Operand(input_reg)); @@ -5221,14 +4773,17 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { __ fstp_s(Operand(esp, 0)); __ pop(input_reg); __ test(input_reg, Operand(input_reg)); - DeoptimizeIf(not_zero, instr, "minus zero"); + __ RecordComment("Deferred TaggedToI: minus zero"); + DeoptimizeIf(not_zero, instr); } else { __ fist_s(MemOperand(esp, 0)); __ fild_s(MemOperand(esp, 0)); __ FCmp(); __ pop(input_reg); - DeoptimizeIf(not_equal, instr, "lost precision"); - DeoptimizeIf(parity_even, instr, "NaN"); + __ RecordComment("Deferred TaggedToI: lost precision"); + DeoptimizeIf(not_equal, instr); + __ RecordComment("Deferred TaggedToI: NaN"); + DeoptimizeIf(parity_even, instr); } } } @@ -5418,7 +4973,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { PushSafepointRegistersScope scope(this); __ push(object); __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); + __ CallRuntime(Runtime::kTryMigrateInstance); RecordSafepointWithRegisters( instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); @@ -5488,10 +5043,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - X87Register value_reg = ToX87Register(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - X87Fxch(value_reg); - __ ClampTOSToUint8(result_reg); + UNREACHABLE(); } @@ -5625,32 +5177,12 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { void LCodeGen::DoDoubleBits(LDoubleBits* instr) { - X87Register value_reg = ToX87Register(instr->value()); - Register result_reg = ToRegister(instr->result()); - X87Fxch(value_reg); - __ sub(esp, Immediate(kDoubleSize)); - __ fst_d(Operand(esp, 0)); - if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { - __ mov(result_reg, Operand(esp, kPointerSize)); - } else { - __ mov(result_reg, Operand(esp, 0)); - } - __ add(esp, Immediate(kDoubleSize)); + UNREACHABLE(); } void LCodeGen::DoConstructDouble(LConstructDouble* instr) { - Register hi_reg = ToRegister(instr->hi()); - Register lo_reg = ToRegister(instr->lo()); - X87Register result_reg = ToX87Register(instr->result()); - // Follow below pattern to write a x87 fp register. - X87PrepareToWrite(result_reg); - __ sub(esp, Immediate(kDoubleSize)); - __ mov(Operand(esp, 0), lo_reg); - __ mov(Operand(esp, kPointerSize), hi_reg); - __ fld_d(Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); - X87CommitWrite(result_reg); + UNREACHABLE(); } @@ -6014,7 +5546,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) { void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kStackGuard); + __ CallRuntime(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); DCHECK(instr->HasEnvironment()); @@ -6161,7 +5693,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, __ push(object); __ push(index); __ xor_(esi, esi); - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + __ CallRuntime(Runtime::kLoadMutableDouble); RecordSafepointWithRegisters( instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(object, eax); diff --git a/src/x87/lithium-codegen-x87.h b/src/x87/lithium-codegen-x87.h index 1a4ca29a0..080a46829 100644 --- a/src/x87/lithium-codegen-x87.h +++ b/src/x87/lithium-codegen-x87.h @@ -5,7 +5,6 @@ #ifndef V8_X87_LITHIUM_CODEGEN_X87_H_ #define V8_X87_LITHIUM_CODEGEN_X87_H_ -#include <map> #include "src/x87/lithium-x87.h" #include "src/base/logging.h" @@ -85,8 +84,6 @@ class LCodeGen: public LCodeGenBase { X87OperandType operand = kX87DoubleOperand); void X87Mov(Operand src, X87Register reg, X87OperandType operand = kX87DoubleOperand); - void X87Mov(X87Register reg, X87Register src, - X87OperandType operand = kX87DoubleOperand); void X87PrepareBinaryOp( X87Register left, X87Register right, X87Register result); @@ -201,8 +198,9 @@ class LCodeGen: public LCodeGenBase { LInstruction* instr, SafepointMode safepoint_mode); - void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); + void CallRuntime(const Runtime::Function* fun, + int argc, + LInstruction* instr); void CallRuntime(Runtime::FunctionId id, int argc, @@ -236,10 +234,10 @@ class LCodeGen: public LCodeGenBase { void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail, + void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition cc, LInstruction* instr, - const char* detail = NULL); + const char* reason = NULL); bool DeoptEveryNTimes() { return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); @@ -378,7 +376,7 @@ class LCodeGen: public LCodeGenBase { int osr_pc_offset_; bool frame_is_built_; - class X87Stack : public ZoneObject { + class X87Stack { public: explicit X87Stack(MacroAssembler* masm) : stack_depth_(0), is_mutable_(true), masm_(masm) { } @@ -395,23 +393,14 @@ class LCodeGen: public LCodeGenBase { } return true; } - X87Stack& operator=(const X87Stack& other) { - stack_depth_ = other.stack_depth_; - for (int i = 0; i < stack_depth_; i++) { - stack_[i] = other.stack_[i]; - } - return *this; - } bool Contains(X87Register reg); void Fxch(X87Register reg, int other_slot = 0); void Free(X87Register reg); void PrepareToWrite(X87Register reg); void CommitWrite(X87Register reg); void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen); - void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen); + void LeavingBlock(int current_block_id, LGoto* goto_instr); int depth() const { return stack_depth_; } - int GetLayout(); - int st(X87Register reg) { return st2idx(ArrayIndex(reg)); } void pop() { DCHECK(is_mutable_); stack_depth_--; @@ -436,9 +425,6 @@ class LCodeGen: public LCodeGenBase { MacroAssembler* masm_; }; X87Stack x87_stack_; - // block_id -> X87Stack*; - typedef std::map<int, X87Stack*> X87StackMap; - X87StackMap x87_stack_map_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -472,7 +458,6 @@ class LCodeGen: public LCodeGenBase { friend class LDeferredCode; friend class LEnvironment; friend class SafepointGenerator; - friend class X87Stack; DISALLOW_COPY_AND_ASSIGN(LCodeGen); }; diff --git a/src/x87/lithium-gap-resolver-x87.cc b/src/x87/lithium-gap-resolver-x87.cc index 6a6427550..b94e34f2c 100644 --- a/src/x87/lithium-gap-resolver-x87.cc +++ b/src/x87/lithium-gap-resolver-x87.cc @@ -317,15 +317,10 @@ void LGapResolver::EmitMove(int index) { } else if (source->IsDoubleRegister()) { // load from the register onto the stack, store in destination, which must // be a double stack slot in the non-SSE2 case. - if (destination->IsDoubleStackSlot()) { - Operand dst = cgen_->ToOperand(destination); - X87Register src = cgen_->ToX87Register(source); - cgen_->X87Mov(dst, src); - } else { - X87Register dst = cgen_->ToX87Register(destination); - X87Register src = cgen_->ToX87Register(source); - cgen_->X87Mov(dst, src); - } + DCHECK(destination->IsDoubleStackSlot()); + Operand dst = cgen_->ToOperand(destination); + X87Register src = cgen_->ToX87Register(source); + cgen_->X87Mov(dst, src); } else if (source->IsDoubleStackSlot()) { // load from the stack slot on top of the floating point stack, and then // store in destination. If destination is a double register, then it diff --git a/src/x87/lithium-x87.cc b/src/x87/lithium-x87.cc index 993f5adf2..02037c3b0 100644 --- a/src/x87/lithium-x87.cc +++ b/src/x87/lithium-x87.cc @@ -484,12 +484,6 @@ LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { } -LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) { - return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, - X87Register::ToAllocationIndex(reg)); -} - - LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { return Use(value, ToUnallocated(fixed_register)); } @@ -622,12 +616,6 @@ LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, } -LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, - X87Register reg) { - return Define(instr, ToUnallocated(reg)); -} - - LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); int argument_index_accumulator = 0; @@ -884,14 +872,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { if (current->IsControlInstruction() && HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && successor != NULL) { - // Always insert a fpu register barrier here when branch is optimized to - // be a direct goto. - // TODO(weiliang): require a better solution. - if (!current->IsGoto()) { - LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate()); - clobber->set_hydrogen_value(current); - chunk_->AddInstruction(clobber, current_block_); - } instr = new(zone()) LGoto(successor); } else { instr = current->CompileToLithium(this); @@ -951,8 +931,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - if (instr->IsGoto() && - (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) { + if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) { // TODO(olivf) Since phis of spilled values are joined as registers // (not in the stack slot), we need to allow the goto gaps to keep one // x87 register alive. To ensure all other values are still spilled, we @@ -1000,9 +979,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || type.IsJSArray() || type.IsHeapNumber() || type.IsString(); LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL; - LInstruction* branch = - temp != NULL ? new (zone()) LBranch(UseRegister(value), temp) - : new (zone()) LBranch(UseRegisterAtStart(value), temp); + LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp); if (!easy_case && ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || !expected.IsGeneric())) { @@ -1205,16 +1182,16 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input)); - return AssignEnvironment(result); + // Crankshaft is turned off for nosse2. + UNREACHABLE(); + return NULL; } LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { - LOperand* input = UseRegister(instr->value()); + LOperand* input = UseRegisterAtStart(instr->value()); LMathFround* result = new (zone()) LMathFround(input); - return DefineSameAsFirst(result); + return AssignEnvironment(DefineAsRegister(result)); } @@ -1248,11 +1225,11 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { DCHECK(instr->representation().IsDouble()); DCHECK(instr->value()->representation().IsDouble()); - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp1 = FixedTemp(ecx); - LOperand* temp2 = FixedTemp(edx); + LOperand* value = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); LMathExp* result = new(zone()) LMathExp(value, temp1, temp2); - return MarkAsCall(DefineSameAsFirst(result), instr); + return DefineAsRegister(result); } @@ -1265,7 +1242,8 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { LOperand* input = UseRegisterAtStart(instr->value()); - LMathPowHalf* result = new (zone()) LMathPowHalf(input); + LOperand* temp = TempRegister(); + LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); return DefineSameAsFirst(result); } @@ -1637,8 +1615,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { LOperand* left = NULL; LOperand* right = NULL; - LOperand* scratch = TempRegister(); - if (instr->representation().IsSmiOrInteger32()) { DCHECK(instr->left()->representation().Equals(instr->representation())); DCHECK(instr->right()->representation().Equals(instr->representation())); @@ -1651,19 +1627,15 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } - LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch); + LMathMinMax* minmax = new(zone()) LMathMinMax(left, right); return DefineSameAsFirst(minmax); } LInstruction* LChunkBuilder::DoPower(HPower* instr) { - // Unlike ia32, we don't have a MathPowStub and directly call c function. - DCHECK(instr->representation().IsDouble()); - DCHECK(instr->left()->representation().IsDouble()); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LPower* result = new (zone()) LPower(left, right); - return MarkAsCall(DefineSameAsFirst(result), instr); + // Crankshaft is turned off for nosse2. + UNREACHABLE(); + return NULL; } @@ -1725,8 +1697,9 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch( LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return new (zone()) LCompareMinusZeroAndBranch(value); + LOperand* value = UseRegister(instr->value()); + LOperand* scratch = TempRegister(); + return new(zone()) LCompareMinusZeroAndBranch(value, scratch); } @@ -2049,8 +2022,8 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { HValue* value = instr->value(); Representation input_rep = value->representation(); if (input_rep.IsDouble()) { - LOperand* reg = UseRegister(value); - return DefineFixed(new (zone()) LClampDToUint8(reg), eax); + UNREACHABLE(); + return NULL; } else if (input_rep.IsInteger32()) { LOperand* reg = UseFixed(value, eax); return DefineFixed(new(zone()) LClampIToUint8(reg), eax); diff --git a/src/x87/lithium-x87.h b/src/x87/lithium-x87.h index e5cced2ba..233eaf23f 100644 --- a/src/x87/lithium-x87.h +++ b/src/x87/lithium-x87.h @@ -413,7 +413,6 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> { } bool jumps_to_join() const { return block_->predecessors()->length() > 1; } - HBasicBlock* block() const { return block_; } private: HBasicBlock* block_; @@ -985,11 +984,15 @@ class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> { +class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> { public: - explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; } + LMathPowHalf(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") }; @@ -1022,11 +1025,15 @@ class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> { }; -class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> { +class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> { public: - explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; } + LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch, "cmp-minus-zero-and-branch") @@ -1501,17 +1508,15 @@ class LAddI FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 1> { +class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> { public: - LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) { + LMathMinMax(LOperand* left, LOperand* right) { inputs_[0] = left; inputs_[1] = right; - temps_[0] = temp; } LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") DECLARE_HYDROGEN_ACCESSOR(MathMinMax) @@ -2032,12 +2037,11 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> { DECLARE_HYDROGEN_ACCESSOR(CallRuntime) virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE { - return save_doubles() == kDontSaveFPRegs; + return true; } const Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } - SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } }; @@ -2877,8 +2881,6 @@ class LChunkBuilder FINAL : public LChunkBuilderBase { LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg); - LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, - X87Register reg); LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr); // Assigns an environment to an instruction. An instruction which can // deoptimize must have an environment. diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc index 80ce32c56..66f570334 100644 --- a/src/x87/macro-assembler-x87.cc +++ b/src/x87/macro-assembler-x87.cc @@ -148,7 +148,8 @@ void MacroAssembler::InNewSpace( void MacroAssembler::RememberedSetHelper( Register object, // Only used for debug checks. - Register addr, Register scratch, SaveFPRegsMode save_fp, + Register addr, + Register scratch, MacroAssembler::RememberedSetFinalAction and_then) { Label done; if (emit_debug_code()) { @@ -179,7 +180,7 @@ void MacroAssembler::RememberedSetHelper( DCHECK(and_then == kFallThroughAtEnd); j(equal, &done, Label::kNear); } - StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp); + StoreBufferOverflowStub store_buffer_overflow(isolate(), kDontSaveFPRegs); CallStub(&store_buffer_overflow); if (and_then == kReturnAtEnd) { ret(0); @@ -190,31 +191,6 @@ void MacroAssembler::RememberedSetHelper( } -void MacroAssembler::ClampTOSToUint8(Register result_reg) { - Label done, conv_failure; - sub(esp, Immediate(kPointerSize)); - fnclex(); - fist_s(Operand(esp, 0)); - pop(result_reg); - X87CheckIA(); - j(equal, &conv_failure, Label::kNear); - test(result_reg, Immediate(0xFFFFFF00)); - j(zero, &done, Label::kNear); - setcc(sign, result_reg); - sub(result_reg, Immediate(1)); - and_(result_reg, Immediate(255)); - jmp(&done, Label::kNear); - bind(&conv_failure); - fnclex(); - fldz(); - fld(1); - FCmp(); - setcc(below, result_reg); // 1 if negative, 0 if positive. - dec_b(result_reg); // 0 if negative, 255 if positive. - bind(&done); -} - - void MacroAssembler::ClampUint8(Register reg) { Label done; test(reg, Immediate(0xFFFFFF00)); @@ -294,8 +270,11 @@ void MacroAssembler::LoadUint32NoSSE2(Register src) { void MacroAssembler::RecordWriteArray( - Register object, Register value, Register index, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, SmiCheck smi_check, + Register object, + Register value, + Register index, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. @@ -315,8 +294,8 @@ void MacroAssembler::RecordWriteArray( lea(dst, Operand(object, index, times_half_pointer_size, FixedArray::kHeaderSize - kHeapObjectTag)); - RecordWrite(object, dst, value, save_fp, remembered_set_action, - OMIT_SMI_CHECK, pointers_to_here_check_for_value); + RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK, + pointers_to_here_check_for_value); bind(&done); @@ -330,9 +309,13 @@ void MacroAssembler::RecordWriteArray( void MacroAssembler::RecordWriteField( - Register object, int offset, Register value, Register dst, - SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) { + Register object, + int offset, + Register value, + Register dst, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -355,8 +338,8 @@ void MacroAssembler::RecordWriteField( bind(&ok); } - RecordWrite(object, dst, value, save_fp, remembered_set_action, - OMIT_SMI_CHECK, pointers_to_here_check_for_value); + RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK, + pointers_to_here_check_for_value); bind(&done); @@ -369,9 +352,11 @@ void MacroAssembler::RecordWriteField( } -void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map, - Register scratch1, Register scratch2, - SaveFPRegsMode save_fp) { +void MacroAssembler::RecordWriteForMap( + Register object, + Handle<Map> map, + Register scratch1, + Register scratch2) { Label done; Register address = scratch1; @@ -408,8 +393,7 @@ void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map, &done, Label::kNear); - RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET, - save_fp); + RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET); CallStub(&stub); bind(&done); @@ -429,8 +413,11 @@ void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map, void MacroAssembler::RecordWrite( - Register object, Register address, Register value, SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, SmiCheck smi_check, + Register object, + Register address, + Register value, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) { DCHECK(!object.is(value)); DCHECK(!object.is(address)); @@ -474,8 +461,8 @@ void MacroAssembler::RecordWrite( &done, Label::kNear); - RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, - fp_mode); + RecordWriteStub stub(isolate(), object, value, address, + remembered_set_action); CallStub(&stub); bind(&done); @@ -720,53 +707,6 @@ void MacroAssembler::FCmp() { } -void MacroAssembler::FXamMinusZero() { - fxam(); - push(eax); - fnstsw_ax(); - and_(eax, Immediate(0x4700)); - // For minus zero, C3 == 1 && C1 == 1. - cmp(eax, Immediate(0x4200)); - pop(eax); - fstp(0); -} - - -void MacroAssembler::FXamSign() { - fxam(); - push(eax); - fnstsw_ax(); - // For negative value (including -0.0), C1 == 1. - and_(eax, Immediate(0x0200)); - pop(eax); - fstp(0); -} - - -void MacroAssembler::X87CheckIA() { - push(eax); - fnstsw_ax(); - // For #IA, IE == 1 && SF == 0. - and_(eax, Immediate(0x0041)); - cmp(eax, Immediate(0x0001)); - pop(eax); -} - - -// rc=00B, round to nearest. -// rc=01B, round down. -// rc=10B, round up. -// rc=11B, round toward zero. -void MacroAssembler::X87SetRC(int rc) { - sub(esp, Immediate(kPointerSize)); - fnstcw(MemOperand(esp, 0)); - and_(MemOperand(esp, 0), Immediate(0xF3FF)); - or_(MemOperand(esp, 0), Immediate(rc)); - fldcw(MemOperand(esp, 0)); - add(esp, Immediate(kPointerSize)); -} - - void MacroAssembler::AssertNumber(Register object) { if (emit_debug_code()) { Label ok; @@ -904,17 +844,8 @@ void MacroAssembler::EnterExitFramePrologue() { } -void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { - // Optionally save FPU state. - if (save_doubles) { - // Store FPU state to m108byte. - int space = 108 + argc * kPointerSize; - sub(esp, Immediate(space)); - const int offset = -2 * kPointerSize; // entry fp + code object. - fnsave(MemOperand(ebp, offset - 108)); - } else { - sub(esp, Immediate(argc * kPointerSize)); - } +void MacroAssembler::EnterExitFrameEpilogue(int argc) { + sub(esp, Immediate(argc * kPointerSize)); // Get the required frame alignment for the OS. const int kFrameAlignment = base::OS::ActivationFrameAlignment(); @@ -928,7 +859,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { } -void MacroAssembler::EnterExitFrame(bool save_doubles) { +void MacroAssembler::EnterExitFrame() { EnterExitFramePrologue(); // Set up argc and argv in callee-saved registers. @@ -937,23 +868,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { lea(esi, Operand(ebp, eax, times_4, offset)); // Reserve space for argc, argv and isolate. - EnterExitFrameEpilogue(3, save_doubles); + EnterExitFrameEpilogue(3); } void MacroAssembler::EnterApiExitFrame(int argc) { EnterExitFramePrologue(); - EnterExitFrameEpilogue(argc, false); + EnterExitFrameEpilogue(argc); } -void MacroAssembler::LeaveExitFrame(bool save_doubles) { - // Optionally restore FPU state. - if (save_doubles) { - const int offset = -2 * kPointerSize; - frstor(MemOperand(ebp, offset - 108)); - } - +void MacroAssembler::LeaveExitFrame() { // Get the return address from the stack and restore the frame pointer. mov(ecx, Operand(ebp, 1 * kPointerSize)); mov(ebp, Operand(ebp, 0 * kPointerSize)); @@ -1983,8 +1908,8 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { } -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. @@ -1996,7 +1921,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, // smarter. Move(eax, Immediate(num_arguments)); mov(ebx, Immediate(ExternalReference(f, isolate()))); - CEntryStub ces(isolate(), 1, save_doubles); + CEntryStub ces(isolate(), 1); CallStub(&ces); } @@ -2869,9 +2794,9 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1, } -void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand, - Label* not_unique_name, - Label::Distance distance) { +void MacroAssembler::JumpIfNotUniqueName(Operand operand, + Label* not_unique_name, + Label::Distance distance) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h index 322c24a41..1fdca3c9f 100644 --- a/src/x87/macro-assembler-x87.h +++ b/src/x87/macro-assembler-x87.h @@ -74,8 +74,8 @@ class MacroAssembler: public Assembler { // at the address pointed to by the addr register. Only works if addr is not // in new space. void RememberedSetHelper(Register object, // Used for debug code. - Register addr, Register scratch, - SaveFPRegsMode save_fp, + Register addr, + Register scratch, RememberedSetFinalAction and_then); void CheckPageFlag(Register object, @@ -146,8 +146,10 @@ class MacroAssembler: public Assembler { // The offset is the offset from the start of the object, not the offset from // the tagged HeapObject pointer. For use with FieldOperand(reg, off). void RecordWriteField( - Register object, int offset, Register value, Register scratch, - SaveFPRegsMode save_fp, + Register object, + int offset, + Register value, + Register scratch, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, SmiCheck smi_check = INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value = @@ -156,14 +158,20 @@ class MacroAssembler: public Assembler { // As above, but the offset has the tag presubtracted. For use with // Operand(reg, off). void RecordWriteContextSlot( - Register context, int offset, Register value, Register scratch, - SaveFPRegsMode save_fp, + Register context, + int offset, + Register value, + Register scratch, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, SmiCheck smi_check = INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value = kPointersToHereMaybeInteresting) { - RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp, - remembered_set_action, smi_check, + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + remembered_set_action, + smi_check, pointers_to_here_check_for_value); } @@ -174,7 +182,9 @@ class MacroAssembler: public Assembler { // filters out smis so it does not update the write barrier if the // value is a smi. void RecordWriteArray( - Register array, Register value, Register index, SaveFPRegsMode save_fp, + Register array, + Register value, + Register index, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, SmiCheck smi_check = INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value = @@ -186,7 +196,9 @@ class MacroAssembler: public Assembler { // operation. RecordWrite filters out smis so it does not update the // write barrier if the value is a smi. void RecordWrite( - Register object, Register address, Register value, SaveFPRegsMode save_fp, + Register object, + Register address, + Register value, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, SmiCheck smi_check = INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value = @@ -195,8 +207,11 @@ class MacroAssembler: public Assembler { // For page containing |object| mark the region covering the object's map // dirty. |object| is the object being stored into, |map| is the Map object // that was stored. - void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1, - Register scratch2, SaveFPRegsMode save_fp); + void RecordWriteForMap( + Register object, + Handle<Map> map, + Register scratch1, + Register scratch2); // --------------------------------------------------------------------------- // Debugger Support @@ -211,14 +226,14 @@ class MacroAssembler: public Assembler { // arguments in register eax and sets up the number of arguments in // register edi and the pointer to the first argument in register // esi. - void EnterExitFrame(bool save_doubles); + void EnterExitFrame(); void EnterApiExitFrame(int argc); // Leave the current exit frame. Expects the return value in // register eax:edx (untouched) and the pointer to the first // argument in register esi. - void LeaveExitFrame(bool save_doubles); + void LeaveExitFrame(); // Leave the current exit frame. Expects the return value in // register eax (untouched). @@ -420,13 +435,8 @@ class MacroAssembler: public Assembler { // FCmp is similar to integer cmp, but requires unsigned // jcc instructions (je, ja, jae, jb, jbe, je, and jz). void FCmp(); - void FXamMinusZero(); - void FXamSign(); - void X87CheckIA(); - void X87SetRC(int rc); void ClampUint8(Register reg); - void ClampTOSToUint8(Register result_reg); void SlowTruncateToI(Register result_reg, Register input_reg, int offset = HeapNumber::kValueOffset - kHeapObjectTag); @@ -707,17 +717,14 @@ class MacroAssembler: public Assembler { void StubReturn(int argc); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); - void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + void CallRuntime(const Runtime::Function* f, int num_arguments); + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); - CallRuntime(function, function->nargs, kSaveFPRegs); + CallRuntime(function, function->nargs); } - - // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) { - CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); } // Convenience function: call an external reference. @@ -882,13 +889,13 @@ class MacroAssembler: public Assembler { Label* on_not_flat_one_byte_strings); // Checks if the given register or operand is a unique name - void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name, - Label::Distance distance = Label::kFar) { - JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance); + void JumpIfNotUniqueName(Register reg, Label* not_unique_name, + Label::Distance distance = Label::kFar) { + JumpIfNotUniqueName(Operand(reg), not_unique_name, distance); } - void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name, - Label::Distance distance = Label::kFar); + void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, + Label::Distance distance = Label::kFar); void EmitSeqStringSetCharCheck(Register string, Register index, @@ -949,7 +956,7 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper = NullCallWrapper()); void EnterExitFramePrologue(); - void EnterExitFrameEpilogue(int argc, bool save_doubles); + void EnterExitFrameEpilogue(int argc); void LeaveExitFrameEpilogue(bool restore_context); diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index 5198af6ff..44ad3aeb7 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -80,6 +80,11 @@ ############################################################################## # TurboFan compiler failures. + # TODO(dcarney): C calls are broken all over the place. + 'test-run-machops/RunCall*': [SKIP], + 'test-run-machops/RunLoadImmIndex': [SKIP], + 'test-run-machops/RunSpillLotsOfThingsWithCall': [SKIP], + # TODO(sigurds): The schedule is borked with multiple inlinees, # and cannot handle free-floating loops yet 'test-run-inlining/InlineTwiceDependentDiamond': [SKIP], diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc index 5dc32fb52..d2c018f74 100644 --- a/test/cctest/compiler/test-js-typed-lowering.cc +++ b/test/cctest/compiler/test-js-typed-lowering.cc @@ -1385,48 +1385,6 @@ TEST(Int32Comparisons) { } -TEST(BuiltinMathMax) { - JSTypedLoweringTester R; - - Node* fun = R.HeapConstant(handle(R.isolate->context()->math_max_fun())); - Node* call = R.graph.NewNode(R.javascript.Call(2, NO_CALL_FUNCTION_FLAGS), - fun, R.UndefinedConstant()); - Node* r = R.reduce(call); - R.CheckNumberConstant(-V8_INFINITY, r); - - for (size_t i = 0; i < arraysize(kNumberTypes); i++) { - Type* t0 = kNumberTypes[i]; - Node* p0 = R.Parameter(t0, 0); - Node* call = R.graph.NewNode(R.javascript.Call(3, NO_CALL_FUNCTION_FLAGS), - fun, R.UndefinedConstant(), p0); - Node* r = R.reduce(call); - CHECK_EQ(IrOpcode::kParameter, r->opcode()); - CHECK_EQ(p0, r); - } - - for (size_t i = 0; i < arraysize(kNumberTypes); i++) { - for (size_t j = 0; j < arraysize(kNumberTypes); j++) { - Type* t0 = kNumberTypes[i]; - Node* p0 = R.Parameter(t0, 0); - Type* t1 = kNumberTypes[j]; - Node* p1 = R.Parameter(t1, 1); - Node* call = R.graph.NewNode(R.javascript.Call(4, NO_CALL_FUNCTION_FLAGS), - fun, R.UndefinedConstant(), p0, p1); - Node* r = R.reduce(call); - - if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) { - CHECK_EQ(IrOpcode::kPhi, r->opcode()); - CHECK(p0 == r->InputAt(0) || p1 == r->InputAt(0)); - CHECK(p1 == r->InputAt(1) || p0 == r->InputAt(1)); - } else { - CHECK_EQ(IrOpcode::kJSCallFunction, r->opcode()); - CHECK_EQ(call, r); - } - } - } -} - - TEST(BuiltinMathImul) { JSTypedLoweringTester R; diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc index 985e0f8ff..1b5aa616a 100644 --- a/test/cctest/compiler/test-run-machops.cc +++ b/test/cctest/compiler/test-run-machops.cc @@ -3567,6 +3567,82 @@ TEST(RunAddTree) { } +#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C + +static int Seven() { return 7; } +static int UnaryMinus(int a) { return -a; } +static int APlusTwoB(int a, int b) { return a + 2 * b; } + + +TEST(RunCallSeven) { + for (int i = 0; i < 2; i++) { + bool call_direct = i == 0; + void* function_address = + reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven)); + + RawMachineAssemblerTester<int32_t> m; + Node** args = NULL; + MachineType* arg_types = NULL; + Node* function = call_direct + ? m.PointerConstant(function_address) + : m.LoadFromPointer(&function_address, kMachPtr); + m.Return(m.CallC(function, kMachInt32, arg_types, args, 0)); + + CHECK_EQ(7, m.Call()); + } +} + + +TEST(RunCallUnaryMinus) { + for (int i = 0; i < 2; i++) { + bool call_direct = i == 0; + void* function_address = + reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus)); + + RawMachineAssemblerTester<int32_t> m(kMachInt32); + Node* args[] = {m.Parameter(0)}; + MachineType arg_types[] = {kMachInt32}; + Node* function = call_direct + ? m.PointerConstant(function_address) + : m.LoadFromPointer(&function_address, kMachPtr); + m.Return(m.CallC(function, kMachInt32, arg_types, args, 1)); + + FOR_INT32_INPUTS(i) { + int a = *i; + CHECK_EQ(-a, m.Call(a)); + } + } +} + + +TEST(RunCallAPlusTwoB) { + for (int i = 0; i < 2; i++) { + bool call_direct = i == 0; + void* function_address = + reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB)); + + RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32); + Node* args[] = {m.Parameter(0), m.Parameter(1)}; + MachineType arg_types[] = {kMachInt32, kMachInt32}; + Node* function = call_direct + ? m.PointerConstant(function_address) + : m.LoadFromPointer(&function_address, kMachPtr); + m.Return(m.CallC(function, kMachInt32, arg_types, args, 2)); + + FOR_INT32_INPUTS(i) { + FOR_INT32_INPUTS(j) { + int a = *i; + int b = *j; + int result = m.Call(a, b); + CHECK_EQ(a + 2 * b, result); + } + } + } +} + +#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C + + static const int kFloat64CompareHelperTestCases = 15; static const int kFloat64CompareHelperNodeType = 4; @@ -3954,6 +4030,39 @@ TEST(RunNewSpaceConstantsInPhi) { } +#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C + +TEST(RunSpillLotsOfThingsWithCall) { + static const int kInputSize = 1000; + RawMachineAssemblerTester<void> m; + Node* accs[kInputSize]; + int32_t outputs[kInputSize]; + Node* one = m.Int32Constant(1); + Node* acc = one; + for (int i = 0; i < kInputSize; i++) { + acc = m.Int32Add(acc, one); + accs[i] = acc; + } + // If the spill slot computation is wrong, it might load from the c frame + { + void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven)); + Node** args = NULL; + MachineType* arg_types = NULL; + m.CallC(m.PointerConstant(func), kMachInt32, arg_types, args, 0); + } + for (int i = 0; i < kInputSize; i++) { + m.StoreToPointer(&outputs[i], kMachInt32, accs[i]); + } + m.Return(one); + m.Call(); + for (int i = 0; i < kInputSize; i++) { + CHECK_EQ(outputs[i], i + 2); + } +} + +#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C + + TEST(RunInt32AddWithOverflowP) { int32_t actual_val = -1; RawMachineAssemblerTester<int32_t> m; diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc index 2f0674a34..69c10c29d 100644 --- a/test/cctest/test-debug.cc +++ b/test/cctest/test-debug.cc @@ -765,7 +765,6 @@ static void DebugEventEvaluate( CHECK_NE(debug->break_id(), 0); if (event == v8::Break) { - break_point_hit_count++; for (int i = 0; checks[i].expr != NULL; i++) { const int argc = 3; v8::Handle<v8::Value> argv[argc] = { @@ -2407,7 +2406,7 @@ TEST(DebugEvaluate) { }; // Simple test function. The "y=0" is in the function foo to provide a break - // location. For "y=0" the "y" is at position 15 in the foo function + // location. For "y=0" the "y" is at position 15 in the barbar function // therefore setting breakpoint at position 15 will break at "y=0" and // setting it higher will break after. v8::Local<v8::Function> foo = CompileFunction(&env, @@ -2440,34 +2439,6 @@ TEST(DebugEvaluate) { checks = checks_hh; foo->Call(env->Global(), 1, argv_foo); - // Test that overriding Object.prototype will not interfere into evaluation - // on call frame. - v8::Local<v8::Function> zoo = - CompileFunction(&env, - "x = undefined;" - "function zoo(t) {" - " var a=x;" - " Object.prototype.x = 42;" - " x=t;" - " y=0;" // To ensure break location. - " delete Object.prototype.x;" - " x=a;" - "}", - "zoo"); - const int zoo_break_position = 50; - - // Arguments with one parameter "Hello, world!" - v8::Handle<v8::Value> argv_zoo[1] = { - v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")}; - - // Call zoo with breakpoint set at y=0. - DebugEventCounterClear(); - bp = SetBreakPoint(zoo, zoo_break_position); - checks = checks_hu; - zoo->Call(env->Global(), 1, argv_zoo); - CHECK_EQ(1, break_point_hit_count); - ClearBreakPoint(bp); - // Test function with an inner function. The "y=0" is in function barbar // to provide a break location. For "y=0" the "y" is at position 8 in the // barbar function therefore setting breakpoint at position 8 will break at diff --git a/test/cctest/test-disasm-x87.cc b/test/cctest/test-disasm-x87.cc index 6cd33e557..17c49af6c 100644 --- a/test/cctest/test-disasm-x87.cc +++ b/test/cctest/test-disasm-x87.cc @@ -349,7 +349,6 @@ TEST(DisasmIa320) { __ fprem1(); __ fincstp(); __ ftst(); - __ fxam(); __ fxch(3); __ fld_s(Operand(ebx, ecx, times_4, 10000)); __ fstp_s(Operand(ebx, ecx, times_4, 10000)); @@ -379,12 +378,6 @@ TEST(DisasmIa320) { __ fninit(); __ nop(); - __ fldcw(Operand(ebx, ecx, times_4, 10000)); - __ fnstcw(Operand(ebx, ecx, times_4, 10000)); - __ fadd_d(Operand(ebx, ecx, times_4, 10000)); - __ fnsave(Operand(ebx, ecx, times_4, 10000)); - __ frstor(Operand(ebx, ecx, times_4, 10000)); - // xchg. { __ xchg(eax, eax); diff --git a/test/mjsunit/keyed-named-access.js b/test/mjsunit/keyed-named-access.js index 11f8fb50d..f9541e8e0 100644 --- a/test/mjsunit/keyed-named-access.js +++ b/test/mjsunit/keyed-named-access.js @@ -34,39 +34,3 @@ f(o3); f(o3); %OptimizeFunctionOnNextCall(f); assertEquals(1200, f(o3)); - -(function CountOperationDeoptimizationGetter() { - var global = {}; - global.__defineGetter__("A", function () { return "x"; }); - - function h() { - return "A"; - } - - function g(a, b, c) { - try { - return a + b.toString() + c; - } catch (e) { } - } - - function test(o) { - return g(1, o[h()]--, 10); - } - - test(global); - test(global); - %OptimizeFunctionOnNextCall(test); - print(test(global)); -})(); - - -(function CountOperationDeoptimizationPoint() { - function test() { - this[0, ""]--; - } - - test(); - test(); - %OptimizeFunctionOnNextCall(test); - test(); -})(); diff --git a/test/mjsunit/regress/regress-json-parse-index.js b/test/mjsunit/regress/regress-json-parse-index.js deleted file mode 100644 index d1a785aaf..000000000 --- a/test/mjsunit/regress/regress-json-parse-index.js +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -var o = JSON.parse('{"\\u0030":100}'); -assertEquals(100, o[0]); diff --git a/test/mjsunit/regress/string-set-char-deopt.js b/test/mjsunit/regress/string-set-char-deopt.js index c8e8538e1..a4b34e849 100644 --- a/test/mjsunit/regress/string-set-char-deopt.js +++ b/test/mjsunit/regress/string-set-char-deopt.js @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Flags: --allow-natives-syntax --turbo-deoptimization +// Flags: --allow-natives-syntax (function OneByteSeqStringSetCharDeoptOsr() { function deopt() { |