From 3d3c687012be3304f7951e47eec103478c7ec366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 27 Jul 2015 08:43:54 +0200 Subject: [PATCH] deps: update V8 to 4.4.63.26 Includes cherry-picks for: * JitCodeEvent patch: https://crrev.com/f7969b1d5a55e66237221a463daf422ac7611788 * argparse patch: https://crrev.com/44bc918458481d60b08d5566f0f31a79e39b85d7 PR-URL: https://github.com/nodejs/io.js/pull/2220 Reviewed-By: Ben Noordhuis Reviewed-By: Ali Ijaz Sheikh Reviewed-By: Rod Vagg --- deps/v8/include/v8-version.h | 2 +- deps/v8/src/api.cc | 4 +- deps/v8/src/arm/assembler-arm-inl.h | 4 +- deps/v8/src/arm/assembler-arm.cc | 23 +- deps/v8/src/arm/assembler-arm.h | 20 +- deps/v8/src/arm64/code-stubs-arm64.cc | 32 +- deps/v8/src/bootstrapper.cc | 1 + .../v8/src/compiler/arm/code-generator-arm.cc | 2 + deps/v8/src/flag-definitions.h | 2 +- deps/v8/src/hydrogen.cc | 5 +- deps/v8/src/ic/ic.cc | 48 ++- deps/v8/src/isolate.h | 1 + deps/v8/src/mips/assembler-mips-inl.h | 4 +- deps/v8/src/mips/assembler-mips.cc | 4 +- deps/v8/src/mips/assembler-mips.h | 2 +- deps/v8/src/mips/code-stubs-mips.cc | 4 +- deps/v8/src/mips64/assembler-mips64-inl.h | 4 +- deps/v8/src/mips64/assembler-mips64.cc | 4 +- deps/v8/src/mips64/assembler-mips64.h | 2 +- deps/v8/src/mips64/code-stubs-mips64.cc | 16 +- deps/v8/src/objects.cc | 91 +++--- deps/v8/src/objects.h | 1 + deps/v8/src/snapshot/serialize.cc | 285 ++++++++++++------ deps/v8/src/snapshot/serialize.h | 91 ++++-- deps/v8/src/unicode-decoder.cc | 2 +- deps/v8/src/x64/lithium-x64.cc | 16 +- deps/v8/test/cctest/test-api.cc | 90 ++++++ deps/v8/test/cctest/test-assembler-arm.cc | 59 ++++ deps/v8/test/cctest/test-assembler-mips.cc | 3 + deps/v8/test/cctest/test-assembler-mips64.cc | 3 + .../test/cctest/test-macro-assembler-mips.cc | 90 +++++- .../cctest/test-macro-assembler-mips64.cc | 89 ++++++ deps/v8/test/cctest/test-serialize.cc | 95 +++++- .../mjsunit/es6/regress/regress-cr493566.js | 80 +++++ deps/v8/test/mjsunit/mjsunit.status | 4 - .../v8/test/mjsunit/regress/regress-487981.js | 22 ++ .../mjsunit/regress/regress-crbug-478612.js | 52 ++++ .../mjsunit/regress/regress-crbug-500497.js | 33 ++ .../mjsunit/regress/regress-crbug-502930.js | 27 ++ .../mjsunit/regress/regress-crbug-514268.js | 23 ++ 40 files changed, 1102 insertions(+), 238 deletions(-) create mode 100644 deps/v8/test/mjsunit/es6/regress/regress-cr493566.js create mode 100644 deps/v8/test/mjsunit/regress/regress-487981.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-478612.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-500497.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-502930.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-514268.js diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 2f2ea5533387d0..c52267e024ca30 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 4 #define V8_MINOR_VERSION 4 #define V8_BUILD_NUMBER 63 -#define V8_PATCH_LEVEL 12 +#define V8_PATCH_LEVEL 26 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 01441351c5ad08..4f06873036c2f9 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -345,12 +345,14 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) { base::ElapsedTimer timer; timer.Start(); Isolate::Scope isolate_scope(isolate); + internal_isolate->set_creating_default_snapshot(true); internal_isolate->Init(NULL); Persistent context; i::Snapshot::Metadata metadata; { HandleScope handle_scope(isolate); Handle new_context = Context::New(isolate); + internal_isolate->set_creating_default_snapshot(false); context.Reset(isolate, new_context); if (custom_source != NULL) { metadata.set_embeds_script(true); @@ -379,7 +381,7 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) { i::SnapshotByteSink context_sink; i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink); context_ser.Serialize(&raw_context); - ser.SerializeWeakReferences(); + ser.SerializeWeakReferencesAndDeferred(); result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata); } diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 0b5ced51595417..1227156edaa7ad 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -432,9 +432,7 @@ void Assembler::CheckBuffer() { if (buffer_space() <= kGap) { GrowBuffer(); } - if (pc_offset() >= next_buffer_check_) { - CheckConstPool(false, true); - } + MaybeCheckConstPool(); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index da1ab68a76d0ae..a396d0fe6c028b 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1298,7 +1298,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { } -int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { +int Assembler::branch_offset(Label* L) { int target_pos; if (L->is_bound()) { target_pos = L->pos(); @@ -1315,7 +1315,8 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { // Block the emission of the constant pool, since the branch instruction must // be emitted at the pc offset recorded by the label. - BlockConstPoolFor(1); + if (!is_const_pool_blocked()) BlockConstPoolFor(1); + return target_pos - (pc_offset() + kPcLoadDelta); } @@ -1367,6 +1368,24 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t } +void Assembler::b(Label* L, Condition cond) { + CheckBuffer(); + b(branch_offset(L), cond); +} + + +void Assembler::bl(Label* L, Condition cond) { + CheckBuffer(); + bl(branch_offset(L), cond); +} + + +void Assembler::blx(Label* L) { + CheckBuffer(); + blx(branch_offset(L)); +} + + // Data-processing instructions. void Assembler::and_(Register dst, Register src1, const Operand& src2, diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 836ff4f3d02f08..5422b3f20e2492 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -746,7 +746,7 @@ class Assembler : public AssemblerBase { // Returns the branch offset to the given label from the current code position // Links the label to the current position if it is still unbound // Manages the jump elimination optimization if the second parameter is true. - int branch_offset(Label* L, bool jump_elimination_allowed); + int branch_offset(Label* L); // Returns true if the given pc address is the start of a constant pool load // instruction sequence. @@ -852,13 +852,11 @@ class Assembler : public AssemblerBase { void bx(Register target, Condition cond = al); // v5 and above, plus v4t // Convenience branch instructions using labels - void b(Label* L, Condition cond = al) { - b(branch_offset(L, cond == al), cond); - } - void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); } - void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); } - void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); } - void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above + void b(Label* L, Condition cond = al); + void b(Condition cond, Label* L) { b(L, cond); } + void bl(Label* L, Condition cond = al); + void bl(Condition cond, Label* L) { bl(L, cond); } + void blx(Label* L); // v5 and above // Data-processing instructions @@ -1536,6 +1534,12 @@ class Assembler : public AssemblerBase { // Check if is time to emit a constant pool. void CheckConstPool(bool force_emit, bool require_jump); + void MaybeCheckConstPool() { + if (pc_offset() >= next_buffer_check_) { + CheckConstPool(false, true); + } + } + // Allocate a constant pool of the correct size for the generated code. Handle NewConstantPool(Isolate* isolate); diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc index 9ce5a05ce530d7..7f3c9952049205 100644 --- a/deps/v8/src/arm64/code-stubs-arm64.cc +++ b/deps/v8/src/arm64/code-stubs-arm64.cc @@ -2286,27 +2286,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Register last_match_info_elements = x21; Register code_object = x22; - // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does. - CPURegList used_callee_saved_registers(subject, - regexp_data, - last_match_info_elements, - code_object); - __ PushCPURegList(used_callee_saved_registers); - // Stack frame. - // jssp[0] : x19 - // jssp[8] : x20 - // jssp[16]: x21 - // jssp[24]: x22 - // jssp[32]: last_match_info (JSArray) - // jssp[40]: previous index - // jssp[48]: subject string - // jssp[56]: JSRegExp object - - const int kLastMatchInfoOffset = 4 * kPointerSize; - const int kPreviousIndexOffset = 5 * kPointerSize; - const int kSubjectOffset = 6 * kPointerSize; - const int kJSRegExpOffset = 7 * kPointerSize; + // jssp[00]: last_match_info (JSArray) + // jssp[08]: previous index + // jssp[16]: subject string + // jssp[24]: JSRegExp object + + const int kLastMatchInfoOffset = 0 * kPointerSize; + const int kPreviousIndexOffset = 1 * kPointerSize; + const int kSubjectOffset = 2 * kPointerSize; + const int kJSRegExpOffset = 3 * kPointerSize; // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = @@ -2673,7 +2662,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Return last match info. __ Peek(x0, kLastMatchInfoOffset); - __ PopCPURegList(used_callee_saved_registers); // Drop the 4 arguments of the stub from the stack. __ Drop(4); __ Ret(); @@ -2696,13 +2684,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ Bind(&failure); __ Mov(x0, Operand(isolate()->factory()->null_value())); - __ PopCPURegList(used_callee_saved_registers); // Drop the 4 arguments of the stub from the stack. __ Drop(4); __ Ret(); __ Bind(&runtime); - __ PopCPURegList(used_callee_saved_registers); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); // Deferred code for string handling. diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index f3dd682871b749..c56c429937a275 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -2813,6 +2813,7 @@ void Genesis::TransferNamedProperties(Handle from, if (value->IsPropertyCell()) { value = handle(PropertyCell::cast(*value)->value(), isolate()); } + if (value->IsTheHole()) continue; PropertyDetails details = properties->DetailsAt(i); DCHECK_EQ(kData, details.kind()); JSObject::AddProperty(to, key, value, details.attributes()); diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc index 306c347f8a16cf..cc93cf4451dd55 100644 --- a/deps/v8/src/compiler/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/arm/code-generator-arm.cc @@ -316,6 +316,8 @@ void CodeGenerator::AssembleDeconstructActivationRecord() { void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ArmOperandConverter i(this, instr); + masm()->MaybeCheckConstPool(); + switch (ArchOpcodeField::decode(instr->opcode())) { case kArchCallCodeObject: { EnsureSpaceForLazyDeopt(); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 93e61df0825ea3..2b905e31d43e1d 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -199,7 +199,7 @@ DEFINE_IMPLICATION(es_staging, harmony) #define HARMONY_STAGED(V) \ V(harmony_rest_parameters, "harmony rest parameters") \ V(harmony_spreadcalls, "harmony spread-calls") \ - V(harmony_tostring, "harmony toString") \ + V(harmony_tostring, "harmony toString") // Features that are shipping (turned on by default, but internal flag remains). #define HARMONY_SHIPPING(V) \ diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index a1b06482078471..6a86c736ee5e63 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -5219,9 +5219,12 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt, HValue* function = AddLoadJSBuiltin(Builtins::FILTER_KEY); Add(enumerable, key); key = Add(function, 2); + Push(key); + Add(stmt->FilterId()); + key = Pop(); Bind(each_var, key); - Add(stmt->AssignmentId()); Add(key); + Add(stmt->AssignmentId()); } BreakAndContinueInfo break_info(stmt, scope(), 5); diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc index daf7704c71d5ff..65b2e3df9ad15d 100644 --- a/deps/v8/src/ic/ic.cc +++ b/deps/v8/src/ic/ic.cc @@ -1112,7 +1112,39 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) { code = slow_stub(); } } else { - code = ComputeHandler(lookup); + if (lookup->state() == LookupIterator::ACCESSOR) { + Handle accessors = lookup->GetAccessors(); + Handle map = receiver_map(); + if (accessors->IsExecutableAccessorInfo()) { + Handle info = + Handle::cast(accessors); + if ((v8::ToCData
(info->getter()) != 0) && + !ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info, + map)) { + TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type"); + code = slow_stub(); + } + } else if (accessors->IsAccessorPair()) { + Handle getter(Handle::cast(accessors)->getter(), + isolate()); + Handle holder = lookup->GetHolder(); + Handle receiver = lookup->GetReceiver(); + if (getter->IsJSFunction() && holder->HasFastProperties()) { + Handle function = Handle::cast(getter); + if (receiver->IsJSObject() || function->IsBuiltin() || + !is_sloppy(function->shared()->language_mode())) { + CallOptimization call_optimization(function); + if (call_optimization.is_simple_api_call() && + !call_optimization.IsCompatibleReceiver(receiver, holder)) { + TRACE_GENERIC_IC(isolate(), "LoadIC", + "incompatible receiver type"); + code = slow_stub(); + } + } + } + } + } + if (code.is_null()) code = ComputeHandler(lookup); } PatchCache(lookup->name(), code); @@ -1242,6 +1274,8 @@ Handle LoadIC::CompileHandler(LookupIterator* lookup, if (v8::ToCData
(info->getter()) == 0) break; if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info, map)) { + // This case should be already handled in LoadIC::UpdateCaches. + UNREACHABLE(); break; } if (!holder->HasFastProperties()) break; @@ -1262,10 +1296,14 @@ Handle LoadIC::CompileHandler(LookupIterator* lookup, } CallOptimization call_optimization(function); NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder); - if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(receiver, holder)) { - return compiler.CompileLoadCallback(lookup->name(), call_optimization, - lookup->GetAccessorIndex()); + if (call_optimization.is_simple_api_call()) { + if (call_optimization.IsCompatibleReceiver(receiver, holder)) { + return compiler.CompileLoadCallback( + lookup->name(), call_optimization, lookup->GetAccessorIndex()); + } else { + // This case should be already handled in LoadIC::UpdateCaches. + UNREACHABLE(); + } } int expected_arguments = function->shared()->internal_formal_parameter_count(); diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index 749eb4fd57bd58..84190801f01350 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -389,6 +389,7 @@ typedef List DebugObjectCache; V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \ V(PromiseRejectCallback, promise_reject_callback, NULL) \ V(const v8::StartupData*, snapshot_blob, NULL) \ + V(bool, creating_default_snapshot, false) \ ISOLATE_INIT_SIMULATOR_LIST(V) #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \ diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index afca7d00a6c113..bb422a3fcd900b 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -500,8 +500,8 @@ void Assembler::CheckBuffer() { } -void Assembler::CheckTrampolinePoolQuick() { - if (pc_offset() >= next_buffer_check_) { +void Assembler::CheckTrampolinePoolQuick(int extra_instructions) { + if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { CheckTrampolinePool(); } } diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index e05fc015a838c7..1d38f3a3dc2dec 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -795,7 +795,7 @@ void Assembler::bind_to(Label* L, int pos) { trampoline_pos = get_trampoline_entry(fixup_pos); CHECK(trampoline_pos != kInvalidSlotPos); } - DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); + CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); target_at_put(fixup_pos, trampoline_pos, false); fixup_pos = trampoline_pos; dist = pos - fixup_pos; @@ -1415,6 +1415,7 @@ void Assembler::jal(int32_t target) { void Assembler::jalr(Register rs, Register rd) { + DCHECK(rs.code() != rd.code()); BlockTrampolinePoolScope block_trampoline_pool(this); positions_recorder()->WriteRecordedPositions(); GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); @@ -2633,6 +2634,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::BlockTrampolinePoolFor(int instructions) { + CheckTrampolinePoolQuick(instructions); BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); } diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index 6cdfcfdabd65cf..69201dc32cb5ab 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -1253,7 +1253,7 @@ class Assembler : public AssemblerBase { inline void CheckBuffer(); void GrowBuffer(); inline void emit(Instr x); - inline void CheckTrampolinePoolQuick(); + inline void CheckTrampolinePoolQuick(int extra_instructions = 0); // Instruction generation. // We have 3 different kind of encoding layout on MIPS. diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 5b5e050f511f0f..e1cf6d664103fc 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -4028,8 +4028,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, intptr_t loc = reinterpret_cast(GetCode().location()); __ Move(t9, target); - __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); - __ Call(ra); + __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); + __ Call(at); } diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h index 76117d08e3f7a8..7f18335b5937f1 100644 --- a/deps/v8/src/mips64/assembler-mips64-inl.h +++ b/deps/v8/src/mips64/assembler-mips64-inl.h @@ -504,8 +504,8 @@ void Assembler::CheckBuffer() { } -void Assembler::CheckTrampolinePoolQuick() { - if (pc_offset() >= next_buffer_check_) { +void Assembler::CheckTrampolinePoolQuick(int extra_instructions) { + if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { CheckTrampolinePool(); } } diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc index 7a3091530336de..685100f59a9000 100644 --- a/deps/v8/src/mips64/assembler-mips64.cc +++ b/deps/v8/src/mips64/assembler-mips64.cc @@ -780,7 +780,7 @@ void Assembler::bind_to(Label* L, int pos) { trampoline_pos = get_trampoline_entry(fixup_pos); CHECK(trampoline_pos != kInvalidSlotPos); } - DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); + CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); target_at_put(fixup_pos, trampoline_pos, false); fixup_pos = trampoline_pos; dist = pos - fixup_pos; @@ -1396,6 +1396,7 @@ void Assembler::jal(int64_t target) { void Assembler::jalr(Register rs, Register rd) { + DCHECK(rs.code() != rd.code()); BlockTrampolinePoolScope block_trampoline_pool(this); positions_recorder()->WriteRecordedPositions(); GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); @@ -2809,6 +2810,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::BlockTrampolinePoolFor(int instructions) { + CheckTrampolinePoolQuick(instructions); BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); } diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h index bb7fac460702c1..7f026bf57cc94e 100644 --- a/deps/v8/src/mips64/assembler-mips64.h +++ b/deps/v8/src/mips64/assembler-mips64.h @@ -1288,7 +1288,7 @@ class Assembler : public AssemblerBase { void GrowBuffer(); inline void emit(Instr x); inline void emit(uint64_t x); - inline void CheckTrampolinePoolQuick(); + inline void CheckTrampolinePoolQuick(int extra_instructions = 0); // Instruction generation. // We have 3 different kind of encoding layout on MIPS. diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc index fb9c4ee068fe1d..e53064f05a039c 100644 --- a/deps/v8/src/mips64/code-stubs-mips64.cc +++ b/deps/v8/src/mips64/code-stubs-mips64.cc @@ -4071,8 +4071,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, intptr_t loc = reinterpret_cast(GetCode().location()); __ Move(t9, target); - __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); - __ Call(ra); + __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); + __ Call(at); } @@ -5285,9 +5285,9 @@ static void CallApiFunctionAndReturn( __ li(s3, Operand(next_address)); __ ld(s0, MemOperand(s3, kNextOffset)); __ ld(s1, MemOperand(s3, kLimitOffset)); - __ ld(s2, MemOperand(s3, kLevelOffset)); - __ Daddu(s2, s2, Operand(1)); - __ sd(s2, MemOperand(s3, kLevelOffset)); + __ lw(s2, MemOperand(s3, kLevelOffset)); + __ Addu(s2, s2, Operand(1)); + __ sw(s2, MemOperand(s3, kLevelOffset)); if (FLAG_log_timer_events) { FrameScope frame(masm, StackFrame::MANUAL); @@ -5328,11 +5328,11 @@ static void CallApiFunctionAndReturn( // previous handle scope. __ sd(s0, MemOperand(s3, kNextOffset)); if (__ emit_debug_code()) { - __ ld(a1, MemOperand(s3, kLevelOffset)); + __ lw(a1, MemOperand(s3, kLevelOffset)); __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); } - __ Dsubu(s2, s2, Operand(1)); - __ sd(s2, MemOperand(s3, kLevelOffset)); + __ Subu(s2, s2, Operand(1)); + __ sw(s2, MemOperand(s3, kLevelOffset)); __ ld(at, MemOperand(s3, kLimitOffset)); __ Branch(&delete_allocated_handles, ne, s1, Operand(at)); diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 67a7b2bc7a18bd..82a27f4d215ba6 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -3275,54 +3275,58 @@ MaybeHandle Object::SetSuperProperty(LookupIterator* it, if (found) return result; LookupIterator own_lookup(it->GetReceiver(), it->name(), LookupIterator::OWN); + for (; own_lookup.IsFound(); own_lookup.Next()) { + switch (own_lookup.state()) { + case LookupIterator::ACCESS_CHECK: + if (!own_lookup.HasAccess()) { + return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value, + SLOPPY); + } + break; - switch (own_lookup.state()) { - case LookupIterator::NOT_FOUND: - return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode, - store_mode); - - case LookupIterator::INTEGER_INDEXED_EXOTIC: - return result; + case LookupIterator::INTEGER_INDEXED_EXOTIC: + return RedefineNonconfigurableProperty(it->isolate(), it->name(), value, + language_mode); - case LookupIterator::DATA: { - PropertyDetails details = own_lookup.property_details(); - if (details.IsConfigurable() || !details.IsReadOnly()) { - return JSObject::SetOwnPropertyIgnoreAttributes( - Handle::cast(it->GetReceiver()), it->name(), value, - details.attributes()); + case LookupIterator::DATA: { + PropertyDetails details = own_lookup.property_details(); + if (details.IsConfigurable() || !details.IsReadOnly()) { + return JSObject::SetOwnPropertyIgnoreAttributes( + Handle::cast(it->GetReceiver()), it->name(), value, + details.attributes()); + } + return WriteToReadOnlyProperty(&own_lookup, value, language_mode); } - return WriteToReadOnlyProperty(&own_lookup, value, language_mode); - } - case LookupIterator::ACCESSOR: { - PropertyDetails details = own_lookup.property_details(); - if (details.IsConfigurable()) { - return JSObject::SetOwnPropertyIgnoreAttributes( - Handle::cast(it->GetReceiver()), it->name(), value, - details.attributes()); - } + case LookupIterator::ACCESSOR: { + PropertyDetails details = own_lookup.property_details(); + if (details.IsConfigurable()) { + return JSObject::SetOwnPropertyIgnoreAttributes( + Handle::cast(it->GetReceiver()), it->name(), value, + details.attributes()); + } - return RedefineNonconfigurableProperty(it->isolate(), it->name(), value, - language_mode); - } + return RedefineNonconfigurableProperty(it->isolate(), it->name(), value, + language_mode); + } - case LookupIterator::TRANSITION: - UNREACHABLE(); - break; + case LookupIterator::INTERCEPTOR: + case LookupIterator::JSPROXY: { + bool found = false; + MaybeHandle result = SetPropertyInternal( + &own_lookup, value, language_mode, store_mode, &found); + if (found) return result; + break; + } - case LookupIterator::INTERCEPTOR: - case LookupIterator::JSPROXY: - case LookupIterator::ACCESS_CHECK: { - bool found = false; - MaybeHandle result = SetPropertyInternal( - &own_lookup, value, language_mode, store_mode, &found); - if (found) return result; - return SetDataProperty(&own_lookup, value); + case LookupIterator::NOT_FOUND: + case LookupIterator::TRANSITION: + UNREACHABLE(); } } - UNREACHABLE(); - return MaybeHandle(); + return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode, + store_mode); } @@ -14697,9 +14701,10 @@ Handle HashTable::New( PretenureFlag pretenure) { DCHECK(0 <= at_least_space_for); DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for)); + int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY) ? at_least_space_for - : isolate->serializer_enabled() + : isolate->creating_default_snapshot() ? ComputeCapacityForSerialization(at_least_space_for) : ComputeCapacity(at_least_space_for); if (capacity > HashTable::kMaxCapacity) { @@ -15692,6 +15697,14 @@ Handle StringTable::LookupKey(Isolate* isolate, HashTableKey* key) { } +String* StringTable::LookupKeyIfExists(Isolate* isolate, HashTableKey* key) { + Handle table = isolate->factory()->string_table(); + int entry = table->FindEntry(key); + if (entry != kNotFound) return String::cast(table->KeyAt(entry)); + return NULL; +} + + Handle CompilationCacheTable::Lookup(Handle src, Handle context, LanguageMode language_mode) { diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 4823243f2384b5..5ec1d7d27baf31 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -3553,6 +3553,7 @@ class StringTable: public HashTable LookupString(Isolate* isolate, Handle key); static Handle LookupKey(Isolate* isolate, HashTableKey* key); + static String* LookupKeyIfExists(Isolate* isolate, HashTableKey* key); // Tries to internalize given string and returns string handle on success // or an empty handle otherwise. diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc index dbe92a6accb129..7f123a3fbed622 100644 --- a/deps/v8/src/snapshot/serialize.cc +++ b/deps/v8/src/snapshot/serialize.cc @@ -516,10 +516,18 @@ void Deserializer::DecodeReservation( void Deserializer::FlushICacheForNewCodeObjects() { - PageIterator it(isolate_->heap()->code_space()); - while (it.has_next()) { - Page* p = it.next(); - CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); + if (!deserializing_user_code_) { + // The entire isolate is newly deserialized. Simply flush all code pages. + PageIterator it(isolate_->heap()->code_space()); + while (it.has_next()) { + Page* p = it.next(); + CpuFeatures::FlushICache(p->area_start(), + p->area_end() - p->area_start()); + } + } + for (Code* code : new_code_objects_) { + CpuFeatures::FlushICache(code->instruction_start(), + code->instruction_size()); } } @@ -556,10 +564,15 @@ void Deserializer::Deserialize(Isolate* isolate) { DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); // No active handles. DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); - isolate_->heap()->IterateSmiRoots(this); - isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); - isolate_->heap()->RepairFreeListsAfterDeserialization(); - isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); + + { + DisallowHeapAllocation no_gc; + isolate_->heap()->IterateSmiRoots(this); + isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); + isolate_->heap()->RepairFreeListsAfterDeserialization(); + isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); + DeserializeDeferredObjects(); + } isolate_->heap()->set_native_contexts_list( isolate_->heap()->undefined_value()); @@ -608,11 +621,12 @@ MaybeHandle Deserializer::DeserializePartial( Object* root; Object* outdated_contexts; VisitPointer(&root); + DeserializeDeferredObjects(); VisitPointer(&outdated_contexts); - // There's no code deserialized here. If this assert fires - // then that's changed and logging should be added to notify - // the profiler et al of the new code. + // There's no code deserialized here. If this assert fires then that's + // changed and logging should be added to notify the profiler et al of the + // new code, which also has to be flushed from instruction cache. CHECK_EQ(start_address, code_space->top()); CHECK(outdated_contexts->IsFixedArray()); *outdated_contexts_out = @@ -628,10 +642,17 @@ MaybeHandle Deserializer::DeserializeCode( return Handle(); } else { deserializing_user_code_ = true; - DisallowHeapAllocation no_gc; - Object* root; - VisitPointer(&root); - return Handle(SharedFunctionInfo::cast(root)); + HandleScope scope(isolate); + Handle result; + { + DisallowHeapAllocation no_gc; + Object* root; + VisitPointer(&root); + DeserializeDeferredObjects(); + result = Handle(SharedFunctionInfo::cast(root)); + } + CommitNewInternalizedStrings(isolate); + return scope.CloseAndEscape(result); } } @@ -652,13 +673,21 @@ void Deserializer::VisitPointers(Object** start, Object** end) { } -void Deserializer::RelinkAllocationSite(AllocationSite* site) { - if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { - site->set_weak_next(isolate_->heap()->undefined_value()); - } else { - site->set_weak_next(isolate_->heap()->allocation_sites_list()); +void Deserializer::DeserializeDeferredObjects() { + for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) { + int space = code & kSpaceMask; + DCHECK(space <= kNumberOfSpaces); + DCHECK(code - space == kNewObject); + HeapObject* object = GetBackReferencedObject(space); + int size = source_.GetInt() << kPointerSizeLog2; + Address obj_address = object->address(); + Object** start = reinterpret_cast(obj_address + kPointerSize); + Object** end = reinterpret_cast(obj_address + size); + bool filled = ReadData(start, end, space, obj_address); + CHECK(filled); + DCHECK(CanBeDeferred(object)); + PostProcessNewObject(object, space); } - isolate_->heap()->set_allocation_sites_list(site); } @@ -688,31 +717,76 @@ class StringTableInsertionKey : public HashTableKey { return handle(string_, isolate); } + private: String* string_; uint32_t hash_; + DisallowHeapAllocation no_gc; }; -HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) { - if (obj->IsString()) { - String* string = String::cast(obj); - // Uninitialize hash field as the hash seed may have changed. - string->set_hash_field(String::kEmptyHashField); - if (string->IsInternalizedString()) { - DisallowHeapAllocation no_gc; - HandleScope scope(isolate_); - StringTableInsertionKey key(string); - String* canonical = *StringTable::LookupKey(isolate_, &key); - string->SetForwardedInternalizedString(canonical); - return canonical; +HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) { + if (deserializing_user_code()) { + if (obj->IsString()) { + String* string = String::cast(obj); + // Uninitialize hash field as the hash seed may have changed. + string->set_hash_field(String::kEmptyHashField); + if (string->IsInternalizedString()) { + // Canonicalize the internalized string. If it already exists in the + // string table, set it to forward to the existing one. + StringTableInsertionKey key(string); + String* canonical = StringTable::LookupKeyIfExists(isolate_, &key); + if (canonical == NULL) { + new_internalized_strings_.Add(handle(string)); + return string; + } else { + string->SetForwardedInternalizedString(canonical); + return canonical; + } + } + } else if (obj->IsScript()) { + // Assign a new script id to avoid collision. + Script::cast(obj)->set_id(isolate_->heap()->NextScriptId()); + } else { + DCHECK(CanBeDeferred(obj)); + } + } + if (obj->IsAllocationSite()) { + DCHECK(obj->IsAllocationSite()); + // Allocation sites are present in the snapshot, and must be linked into + // a list at deserialization time. + AllocationSite* site = AllocationSite::cast(obj); + // TODO(mvstanton): consider treating the heap()->allocation_sites_list() + // as a (weak) root. If this root is relocated correctly, this becomes + // unnecessary. + if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { + site->set_weak_next(isolate_->heap()->undefined_value()); + } else { + site->set_weak_next(isolate_->heap()->allocation_sites_list()); + } + isolate_->heap()->set_allocation_sites_list(site); + } else if (obj->IsCode()) { + // We flush all code pages after deserializing the startup snapshot. In that + // case, we only need to remember code objects in the large object space. + // When deserializing user code, remember each individual code object. + if (deserializing_user_code() || space == LO_SPACE) { + new_code_objects_.Add(Code::cast(obj)); } - } else if (obj->IsScript()) { - Script::cast(obj)->set_id(isolate_->heap()->NextScriptId()); } return obj; } +void Deserializer::CommitNewInternalizedStrings(Isolate* isolate) { + StringTable::EnsureCapacityForDeserialization( + isolate, new_internalized_strings_.length()); + for (Handle string : new_internalized_strings_) { + StringTableInsertionKey key(*string); + DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key)); + StringTable::LookupKey(isolate, &key); + } +} + + HeapObject* Deserializer::GetBackReferencedObject(int space) { HeapObject* obj; BackReference back_reference(source_.GetInt()); @@ -746,21 +820,10 @@ void Deserializer::ReadObject(int space_number, Object** write_back) { HeapObject* obj; int next_int = source_.GetInt(); - bool double_align = false; -#ifndef V8_HOST_ARCH_64_BIT - double_align = next_int == kDoubleAlignmentSentinel; - if (double_align) next_int = source_.GetInt(); -#endif - DCHECK_NE(kDoubleAlignmentSentinel, next_int); int size = next_int << kObjectAlignmentBits; - int reserved_size = size + (double_align ? kPointerSize : 0); - address = Allocate(space_number, reserved_size); + address = Allocate(space_number, size); obj = HeapObject::FromAddress(address); - if (double_align) { - obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size); - address = obj->address(); - } isolate_->heap()->OnAllocationEvent(obj, size); Object** current = reinterpret_cast(address); @@ -768,24 +831,17 @@ void Deserializer::ReadObject(int space_number, Object** write_back) { if (FLAG_log_snapshot_positions) { LOG(isolate_, SnapshotPositionEvent(address, source_.position())); } - ReadData(current, limit, space_number, address); - - // TODO(mvstanton): consider treating the heap()->allocation_sites_list() - // as a (weak) root. If this root is relocated correctly, - // RelinkAllocationSite() isn't necessary. - if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj)); - // Fix up strings from serialized user code. - if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); + if (ReadData(current, limit, space_number, address)) { + // Only post process if object content has not been deferred. + obj = PostProcessNewObject(obj, space_number); + } Object* write_back_obj = obj; UnalignedCopy(write_back, &write_back_obj); #ifdef DEBUG if (obj->IsCode()) { DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); -#ifdef VERIFY_HEAP - obj->ObjectVerify(); -#endif // VERIFY_HEAP } else { DCHECK(space_number != CODE_SPACE); } @@ -829,7 +885,7 @@ Address Deserializer::Allocate(int space_index, int size) { } -void Deserializer::ReadData(Object** current, Object** limit, int source_space, +bool Deserializer::ReadData(Object** current, Object** limit, int source_space, Address current_object_address) { Isolate* const isolate = isolate_; // Write barrier support costs around 1% in startup time. In fact there @@ -1086,6 +1142,18 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space, break; } + case kDeferred: { + // Deferred can only occur right after the heap object header. + DCHECK(current == reinterpret_cast(current_object_address + + kPointerSize)); + HeapObject* obj = HeapObject::FromAddress(current_object_address); + // If the deferred object is a map, its instance type may be used + // during deserialization. Initialize it with a temporary value. + if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE); + current = limit; + return false; + } + case kSynchronize: // If we get here then that indicates that you have a mismatch between // the number of GC roots when serializing and deserializing. @@ -1192,6 +1260,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space, } } CHECK_EQ(limit, current); + return true; } @@ -1200,6 +1269,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) sink_(sink), external_reference_encoder_(isolate), root_index_map_(isolate), + recursion_depth_(0), code_address_map_(NULL), large_objects_total_size_(0), seen_large_objects_index_(0) { @@ -1275,6 +1345,16 @@ void Serializer::OutputStatistics(const char* name) { } +void Serializer::SerializeDeferredObjects() { + while (deferred_objects_.length() > 0) { + HeapObject* obj = deferred_objects_.RemoveLast(); + ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject); + obj_serializer.SerializeDeferred(); + } + sink_->Put(kSynchronize, "Finished with deferred objects"); +} + + void StartupSerializer::SerializeStrongReferences() { Isolate* isolate = this->isolate(); // No active threads. @@ -1318,6 +1398,7 @@ void PartialSerializer::Serialize(Object** o) { back_reference_map()->AddGlobalProxy(context->global_proxy()); } VisitPointer(o); + SerializeDeferredObjects(); SerializeOutdatedContextsAsFixedArray(); Pad(); } @@ -1342,10 +1423,10 @@ void PartialSerializer::SerializeOutdatedContextsAsFixedArray() { sink_->Put(reinterpret_cast(&length_smi)[i], "Byte"); } for (int i = 0; i < length; i++) { - BackReference back_ref = outdated_contexts_[i]; - DCHECK(BackReferenceIsAlreadyAllocated(back_ref)); - sink_->Put(kBackref + back_ref.space(), "BackRef"); - sink_->PutInt(back_ref.reference(), "BackRefValue"); + Context* context = outdated_contexts_[i]; + BackReference back_reference = back_reference_map_.Lookup(context); + sink_->Put(kBackref + back_reference.space(), "BackRef"); + PutBackReference(context, back_reference); } } } @@ -1508,10 +1589,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, "BackRefWithSkip"); sink_->PutInt(skip, "BackRefSkipDistance"); } - DCHECK(BackReferenceIsAlreadyAllocated(back_reference)); - sink_->PutInt(back_reference.reference(), "BackRefValue"); - - hot_objects_.Add(obj); + PutBackReference(obj, back_reference); } return true; } @@ -1547,7 +1625,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, } -void StartupSerializer::SerializeWeakReferences() { +void StartupSerializer::SerializeWeakReferencesAndDeferred() { // This phase comes right after the serialization (of the snapshot). // After we have done the partial serialization the partial snapshot cache // will contain some references needed to decode the partial snapshot. We @@ -1556,6 +1634,7 @@ void StartupSerializer::SerializeWeakReferences() { Object* undefined = isolate()->heap()->undefined_value(); VisitPointer(&undefined); isolate()->heap()->IterateWeakRoots(this, VISIT_ALL); + SerializeDeferredObjects(); Pad(); } @@ -1588,6 +1667,13 @@ void Serializer::PutRoot(int root_index, } +void Serializer::PutBackReference(HeapObject* object, BackReference reference) { + DCHECK(BackReferenceIsAlreadyAllocated(reference)); + sink_->PutInt(reference.reference(), "BackRefValue"); + hot_objects_.Add(object); +} + + void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point, int skip) { if (obj->IsMap()) { @@ -1641,9 +1727,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, Context::cast(obj)->global_object() == global_object_) { // Context refers to the current global object. This reference will // become outdated after deserialization. - BackReference back_reference = back_reference_map_.Lookup(obj); - DCHECK(back_reference.is_valid()); - outdated_contexts_.Add(back_reference); + outdated_contexts_.Add(Context::cast(obj)); } } @@ -1671,17 +1755,8 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space, } back_reference = serializer_->AllocateLargeObject(size); } else { - bool needs_double_align = false; - if (object_->NeedsToEnsureDoubleAlignment()) { - // Add wriggle room for double alignment padding. - back_reference = serializer_->Allocate(space, size + kPointerSize); - needs_double_align = true; - } else { - back_reference = serializer_->Allocate(space, size); - } + back_reference = serializer_->Allocate(space, size); sink_->Put(kNewObject + reference_representation_ + space, "NewObject"); - if (needs_double_align) - sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel"); int encoded_size = size >> kObjectAlignmentBits; DCHECK_NE(kDoubleAlignmentSentinel, encoded_size); sink_->PutInt(encoded_size, "ObjectSizeInWords"); @@ -1773,6 +1848,9 @@ void Serializer::ObjectSerializer::Serialize() { // We cannot serialize typed array objects correctly. DCHECK(!object_->IsJSTypedArray()); + // We don't expect fillers. + DCHECK(!object_->IsFiller()); + if (object_->IsPrototypeInfo()) { Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users(); if (prototype_users->IsWeakFixedArray()) { @@ -1810,6 +1888,39 @@ void Serializer::ObjectSerializer::Serialize() { CHECK_EQ(0, bytes_processed_so_far_); bytes_processed_so_far_ = kPointerSize; + RecursionScope recursion(serializer_); + // Objects that are immediately post processed during deserialization + // cannot be deferred, since post processing requires the object content. + if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) { + serializer_->QueueDeferredObject(object_); + sink_->Put(kDeferred, "Deferring object content"); + return; + } + + object_->IterateBody(map->instance_type(), size, this); + OutputRawData(object_->address() + size); +} + + +void Serializer::ObjectSerializer::SerializeDeferred() { + if (FLAG_trace_serializer) { + PrintF(" Encoding deferred heap object: "); + object_->ShortPrint(); + PrintF("\n"); + } + + int size = object_->Size(); + Map* map = object_->map(); + BackReference reference = serializer_->back_reference_map()->Lookup(object_); + + // Serialize the rest of the object. + CHECK_EQ(0, bytes_processed_so_far_); + bytes_processed_so_far_ = kPointerSize; + + sink_->Put(kNewObject + reference.space(), "deferred object"); + serializer_->PutBackReference(object_, reference); + sink_->PutInt(size >> kPointerSizeLog2, "deferred object size"); + object_->IterateBody(map->instance_type(), size, this); OutputRawData(object_->address() + size); } @@ -2134,6 +2245,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate, DisallowHeapAllocation no_gc; Object** location = Handle::cast(info).location(); cs.VisitPointer(location); + cs.SerializeDeferredObjects(); cs.Pad(); SerializedCodeData data(sink.data(), cs); @@ -2212,8 +2324,6 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, void CodeSerializer::SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code, WhereToPoint where_to_point) { - if (heap_object->IsInternalizedString()) num_internalized_strings_++; - // Object has not yet been serialized. Serialize it here. ObjectSerializer serializer(this, heap_object, sink_, how_to_code, where_to_point); @@ -2325,10 +2435,6 @@ MaybeHandle CodeSerializer::Deserialize( return MaybeHandle(); } - // Eagerly expand string table to avoid allocations during deserialization. - StringTable::EnsureCapacityForDeserialization(isolate, - scd->NumInternalizedStrings()); - // Prepare and register list of attached objects. Vector code_stub_keys = scd->CodeStubKeys(); Vector > attached_objects = Vector >::New( @@ -2492,7 +2598,6 @@ SerializedCodeData::SerializedCodeData(const List& payload, SetHeaderValue(kCpuFeaturesOffset, static_cast(CpuFeatures::SupportedFeatures())); SetHeaderValue(kFlagHashOffset, FlagList::Hash()); - SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings()); SetHeaderValue(kNumReservationsOffset, reservations.length()); SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys); SetHeaderValue(kPayloadLengthOffset, payload.length()); @@ -2570,10 +2675,6 @@ Vector SerializedCodeData::Payload() const { } -int SerializedCodeData::NumInternalizedStrings() const { - return GetHeaderValue(kNumInternalizedStringsOffset); -} - Vector SerializedCodeData::CodeStubKeys() const { int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; const byte* start = data_ + kHeaderSize + reservations_size; diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h index 36514e13d39192..67ce69ab94d66b 100644 --- a/deps/v8/src/snapshot/serialize.h +++ b/deps/v8/src/snapshot/serialize.h @@ -306,6 +306,10 @@ class SerializerDeserializer: public ObjectVisitor { static const int kNumberOfSpaces = LAST_SPACE + 1; protected: + static bool CanBeDeferred(HeapObject* o) { + return !o->IsString() && !o->IsScript(); + } + // ---------- byte code range 0x00..0x7f ---------- // Byte codes in this range represent Where, HowToCode and WhereToPoint. // Where the pointed-to object can be found: @@ -373,6 +377,8 @@ class SerializerDeserializer: public ObjectVisitor { static const int kNop = 0x3d; // Move to next reserved chunk. static const int kNextChunk = 0x3e; + // Deferring object content. + static const int kDeferred = 0x3f; // A tag emitted at strategic points in the snapshot to delineate sections. // If the deserializer does not find these at the expected moments then it // is an indication that the snapshot and the VM do not fit together. @@ -553,22 +559,22 @@ class Deserializer: public SerializerDeserializer { memcpy(dest, src, sizeof(*src)); } - // Allocation sites are present in the snapshot, and must be linked into - // a list at deserialization time. - void RelinkAllocationSite(AllocationSite* site); + void DeserializeDeferredObjects(); + + void CommitNewInternalizedStrings(Isolate* isolate); // Fills in some heap data in an area from start to end (non-inclusive). The // space id is used for the write barrier. The object_address is the address // of the object we are writing into, or NULL if we are not writing into an // object, i.e. if we are writing a series of tagged values that are not on - // the heap. - void ReadData(Object** start, Object** end, int space, + // the heap. Return false if the object content has been deferred. + bool ReadData(Object** start, Object** end, int space, Address object_address); void ReadObject(int space_number, Object** write_back); Address Allocate(int space_index, int size); // Special handling for serialized code like hooking up internalized strings. - HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); + HeapObject* PostProcessNewObject(HeapObject* obj, int space); // This returns the address of an object that has been described in the // snapshot by chunk index and offset. @@ -594,6 +600,8 @@ class Deserializer: public SerializerDeserializer { ExternalReferenceTable* external_reference_table_; List deserialized_large_objects_; + List new_code_objects_; + List > new_internalized_strings_; bool deserializing_user_code_; @@ -612,6 +620,8 @@ class Serializer : public SerializerDeserializer { void EncodeReservations(List* out) const; + void SerializeDeferredObjects(); + Isolate* isolate() const { return isolate_; } BackReferenceMap* back_reference_map() { return &back_reference_map_; } @@ -634,6 +644,7 @@ class Serializer : public SerializerDeserializer { is_code_object_(o->IsCode()), code_has_been_output_(false) {} void Serialize(); + void SerializeDeferred(); void VisitPointers(Object** start, Object** end); void VisitEmbeddedPointer(RelocInfo* target); void VisitExternalReference(Address* p); @@ -675,12 +686,29 @@ class Serializer : public SerializerDeserializer { bool code_has_been_output_; }; + class RecursionScope { + public: + explicit RecursionScope(Serializer* serializer) : serializer_(serializer) { + serializer_->recursion_depth_++; + } + ~RecursionScope() { serializer_->recursion_depth_--; } + bool ExceedsMaximum() { + return serializer_->recursion_depth_ >= kMaxRecursionDepth; + } + + private: + static const int kMaxRecursionDepth = 32; + Serializer* serializer_; + }; + virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, WhereToPoint where_to_point, int skip) = 0; void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, int skip); + void PutBackReference(HeapObject* object, BackReference reference); + // Returns true if the object was successfully serialized. bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point, int skip); @@ -722,6 +750,11 @@ class Serializer : public SerializerDeserializer { SnapshotByteSink* sink() const { return sink_; } + void QueueDeferredObject(HeapObject* obj) { + DCHECK(back_reference_map_.Lookup(obj).is_valid()); + deferred_objects_.Add(obj); + } + void OutputStatistics(const char* name); Isolate* isolate_; @@ -732,8 +765,11 @@ class Serializer : public SerializerDeserializer { BackReferenceMap back_reference_map_; RootIndexMap root_index_map_; + int recursion_depth_; + friend class Deserializer; friend class ObjectSerializer; + friend class RecursionScope; friend class SnapshotData; private: @@ -752,6 +788,9 @@ class Serializer : public SerializerDeserializer { List code_buffer_; + // To handle stack overflow. + List deferred_objects_; + #ifdef OBJECT_PRINT static const int kInstanceTypes = 256; int* instance_type_count_; @@ -797,7 +836,7 @@ class PartialSerializer : public Serializer { void SerializeOutdatedContextsAsFixedArray(); Serializer* startup_serializer_; - List outdated_contexts_; + List outdated_contexts_; Object* global_object_; PartialCacheIndexMap partial_cache_index_map_; DISALLOW_COPY_AND_ASSIGN(PartialSerializer); @@ -829,11 +868,10 @@ class StartupSerializer : public Serializer { virtual void SerializeStrongReferences(); virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, WhereToPoint where_to_point, int skip) override; - void SerializeWeakReferences(); + void SerializeWeakReferencesAndDeferred(); void Serialize() { SerializeStrongReferences(); - SerializeWeakReferences(); - Pad(); + SerializeWeakReferencesAndDeferred(); } private: @@ -862,15 +900,11 @@ class CodeSerializer : public Serializer { } const List* stub_keys() const { return &stub_keys_; } - int num_internalized_strings() const { return num_internalized_strings_; } private: CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source, Code* main_code) - : Serializer(isolate, sink), - source_(source), - main_code_(main_code), - num_internalized_strings_(0) { + : Serializer(isolate, sink), source_(source), main_code_(main_code) { back_reference_map_.AddSourceString(source); } @@ -892,7 +926,6 @@ class CodeSerializer : public Serializer { DisallowHeapAllocation no_gc_; String* source_; Code* main_code_; - int num_internalized_strings_; List stub_keys_; DISALLOW_COPY_AND_ASSIGN(CodeSerializer); }; @@ -951,7 +984,6 @@ class SerializedCodeData : public SerializedData { Vector Reservations() const; Vector Payload() const; - int NumInternalizedStrings() const; Vector CodeStubKeys() const; private: @@ -972,17 +1004,16 @@ class SerializedCodeData : public SerializedData { uint32_t SourceHash(String* source) const { return source->length(); } // The data header consists of uint32_t-sized entries: - // [ 0] magic number and external reference count - // [ 1] version hash - // [ 2] source hash - // [ 3] cpu features - // [ 4] flag hash - // [ 5] number of internalized strings - // [ 6] number of code stub keys - // [ 7] number of reservation size entries - // [ 8] payload length - // [ 9] payload checksum part 1 - // [10] payload checksum part 2 + // [0] magic number and external reference count + // [1] version hash + // [2] source hash + // [3] cpu features + // [4] flag hash + // [5] number of code stub keys + // [6] number of reservation size entries + // [7] payload length + // [8] payload checksum part 1 + // [9] payload checksum part 2 // ... reservations // ... code stub keys // ... serialized payload @@ -990,9 +1021,7 @@ class SerializedCodeData : public SerializedData { static const int kSourceHashOffset = kVersionHashOffset + kInt32Size; static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size; static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size; - static const int kNumInternalizedStringsOffset = kFlagHashOffset + kInt32Size; - static const int kNumReservationsOffset = - kNumInternalizedStringsOffset + kInt32Size; + static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size; static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size; static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size; static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size; diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc index bb520990f0e951..2289e083425aeb 100644 --- a/deps/v8/src/unicode-decoder.cc +++ b/deps/v8/src/unicode-decoder.cc @@ -67,6 +67,7 @@ void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream, // There's a total lack of bounds checking for stream // as it was already done in Reset. stream += cursor; + DCHECK(stream_length >= cursor); stream_length -= cursor; if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) { *data++ = Utf16::LeadSurrogate(character); @@ -78,7 +79,6 @@ void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream, data_length -= 1; } } - DCHECK(stream_length >= 0); } } // namespace unibrow diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 54aed57b89bc24..b8ac5592de1f18 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -1313,7 +1313,13 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); + LOperand* right; + if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { + // We don't support tagged immediates, so we request it in a register. + right = UseRegisterAtStart(instr->BetterRightOperand()); + } else { + right = UseOrConstantAtStart(instr->BetterRightOperand()); + } return DefineSameAsFirst(new(zone()) LBitI(left, right)); } else { return DoArithmeticT(instr->op(), instr); @@ -1555,7 +1561,13 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { DCHECK(instr->left()->representation().Equals(instr->representation())); DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); + LOperand* right; + if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { + // We don't support tagged immediates, so we request it in a register. + right = UseRegisterAtStart(instr->right()); + } else { + right = UseOrConstantAtStart(instr->right()); + } LSubI* sub = new(zone()) LSubI(left, right); LInstruction* result = DefineSameAsFirst(sub); if (instr->CheckFlag(HValue::kCanOverflow)) { diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index da40058a352b00..35e27c3118f806 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -7318,6 +7318,57 @@ THREADED_TEST(Utf16Symbol) { } +THREADED_TEST(Utf16MissingTrailing) { + LocalContext context; + v8::HandleScope scope(context->GetIsolate()); + + // Make sure it will go past the buffer, so it will call `WriteUtf16Slow` + int size = 1024 * 64; + uint8_t* buffer = new uint8_t[size]; + for (int i = 0; i < size; i += 4) { + buffer[i] = 0xf0; + buffer[i + 1] = 0x9d; + buffer[i + 2] = 0x80; + buffer[i + 3] = 0x9e; + } + + // Now invoke the decoder without last 3 bytes + v8::Local str = + v8::String::NewFromUtf8( + context->GetIsolate(), reinterpret_cast(buffer), + v8::NewStringType::kNormal, size - 3).ToLocalChecked(); + USE(str); + delete[] buffer; +} + + +THREADED_TEST(Utf16Trailing3Byte) { + LocalContext context; + v8::HandleScope scope(context->GetIsolate()); + + // Make sure it will go past the buffer, so it will call `WriteUtf16Slow` + int size = 1024 * 63; + uint8_t* buffer = new uint8_t[size]; + for (int i = 0; i < size; i += 3) { + buffer[i] = 0xe2; + buffer[i + 1] = 0x80; + buffer[i + 2] = 0xa6; + } + + // Now invoke the decoder without last 3 bytes + v8::Local str = + v8::String::NewFromUtf8( + context->GetIsolate(), reinterpret_cast(buffer), + v8::NewStringType::kNormal, size).ToLocalChecked(); + + v8::String::Value value(str); + CHECK_EQ(value.length(), size / 3); + CHECK_EQ((*value)[value.length() - 1], 0x2026); + + delete[] buffer; +} + + THREADED_TEST(ToArrayIndex) { LocalContext context; v8::Isolate* isolate = context->GetIsolate(); @@ -20927,3 +20978,42 @@ TEST(SealHandleScopeNested) { USE(obj); } } + + +TEST(CompatibleReceiverCheckOnCachedICHandler) { + v8::Isolate* isolate = CcTest::isolate(); + v8::HandleScope scope(isolate); + v8::Local parent = FunctionTemplate::New(isolate); + v8::Local signature = v8::Signature::New(isolate, parent); + auto returns_42 = + v8::FunctionTemplate::New(isolate, Returns42, Local(), signature); + parent->PrototypeTemplate()->SetAccessorProperty(v8_str("age"), returns_42); + v8::Local child = v8::FunctionTemplate::New(isolate); + child->Inherit(parent); + LocalContext env; + env->Global()->Set(v8_str("Child"), child->GetFunction()); + + // Make sure there's a compiled stub for "Child.prototype.age" in the cache. + CompileRun( + "var real = new Child();\n" + "for (var i = 0; i < 3; ++i) {\n" + " real.age;\n" + "}\n"); + + // Check that the cached stub is never used. + ExpectInt32( + "var fake = Object.create(Child.prototype);\n" + "var result = 0;\n" + "function test(d) {\n" + " if (d == 3) return;\n" + " try {\n" + " fake.age;\n" + " result = 1;\n" + " } catch (e) {\n" + " }\n" + " test(d+1);\n" + "}\n" + "test(0);\n" + "result;\n", + 0); +} diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc index cb895779915512..059c04ad40efad 100644 --- a/deps/v8/test/cctest/test-assembler-arm.cc +++ b/deps/v8/test/cctest/test-assembler-arm.cc @@ -1981,4 +1981,63 @@ TEST(ARMv8_vrintX) { #undef CHECK_VRINT } } + + +TEST(regress4292_b) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label end; + __ mov(r0, Operand(isolate->factory()->infinity_value())); + for (int i = 0; i < 1020; ++i) { + __ b(hi, &end); + } + __ bind(&end); +} + + +TEST(regress4292_bl) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label end; + __ mov(r0, Operand(isolate->factory()->infinity_value())); + for (int i = 0; i < 1020; ++i) { + __ bl(hi, &end); + } + __ bind(&end); +} + + +TEST(regress4292_blx) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label end; + __ mov(r0, Operand(isolate->factory()->infinity_value())); + for (int i = 0; i < 1020; ++i) { + __ blx(&end); + } + __ bind(&end); +} + + +TEST(regress4292_CheckConstPool) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + __ mov(r0, Operand(isolate->factory()->infinity_value())); + __ BlockConstPoolFor(1019); + for (int i = 0; i < 1019; ++i) __ nop(); + __ vldr(d0, MemOperand(r0, 0)); +} + #undef __ diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc index 7d0e1054010b9e..7a8beaa578d87b 100644 --- a/deps/v8/test/cctest/test-assembler-mips.cc +++ b/deps/v8/test/cctest/test-assembler-mips.cc @@ -1672,6 +1672,7 @@ TEST(jump_tables1) { Label done; { + __ BlockTrampolinePoolFor(kNumCases + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases + 7) * Assembler::kInstrSize); Label here; @@ -1748,6 +1749,7 @@ TEST(jump_tables2) { __ bind(&dispatch); { + __ BlockTrampolinePoolFor(kNumCases + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases + 7) * Assembler::kInstrSize); Label here; @@ -1823,6 +1825,7 @@ TEST(jump_tables3) { __ bind(&dispatch); { + __ BlockTrampolinePoolFor(kNumCases + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases + 7) * Assembler::kInstrSize); Label here; diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc index a6673d8e13b6c2..3b422a27168663 100644 --- a/deps/v8/test/cctest/test-assembler-mips64.cc +++ b/deps/v8/test/cctest/test-assembler-mips64.cc @@ -1889,6 +1889,7 @@ TEST(jump_tables1) { Label done; { + __ BlockTrampolinePoolFor(kNumCases * 2 + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); Label here; @@ -1968,6 +1969,7 @@ TEST(jump_tables2) { } __ bind(&dispatch); { + __ BlockTrampolinePoolFor(kNumCases * 2 + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); Label here; @@ -2049,6 +2051,7 @@ TEST(jump_tables3) { } __ bind(&dispatch); { + __ BlockTrampolinePoolFor(kNumCases * 2 + 7); PredictableCodeSizeScope predictable( &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize); Label here; diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc index 3a97d2137f34fe..515bac9d3adfa0 100644 --- a/deps/v8/test/cctest/test-macro-assembler-mips.cc +++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc @@ -26,18 +26,20 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include +#include // NOLINT(readability/streams) -#include "src/v8.h" -#include "test/cctest/cctest.h" - +#include "src/base/utils/random-number-generator.h" #include "src/macro-assembler.h" #include "src/mips/macro-assembler-mips.h" #include "src/mips/simulator-mips.h" +#include "src/v8.h" +#include "test/cctest/cctest.h" using namespace v8::internal; typedef void* (*F)(int x, int y, int p2, int p3, int p4); +typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4); #define __ masm-> @@ -174,4 +176,86 @@ TEST(NaN1) { } +TEST(jump_tables4) { + // Similar to test-assembler-mips jump_tables1, with extra test for branch + // trampoline required before emission of the dd table (where trampolines are + // blocked), and proper transition to long-branch mode. + // Regression test for v8:4294. + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + MacroAssembler assembler(isolate, NULL, 0); + MacroAssembler* masm = &assembler; + + const int kNumCases = 512; + int values[kNumCases]; + isolate->random_number_generator()->NextBytes(values, sizeof(values)); + Label labels[kNumCases]; + Label near_start, end; + + __ addiu(sp, sp, -4); + __ sw(ra, MemOperand(sp)); + + __ mov(v0, zero_reg); + + __ Branch(&end); + __ bind(&near_start); + + // Generate slightly less than 32K instructions, which will soon require + // trampoline for branch distance fixup. + for (int i = 0; i < 32768 - 256; ++i) { + __ addiu(v0, v0, 1); + } + + Label done; + { + __ BlockTrampolinePoolFor(kNumCases + 6); + PredictableCodeSizeScope predictable( + masm, (kNumCases + 6) * Assembler::kInstrSize); + Label here; + + __ bal(&here); + __ sll(at, a0, 2); // In delay slot. + __ bind(&here); + __ addu(at, at, ra); + __ lw(at, MemOperand(at, 4 * Assembler::kInstrSize)); + __ jr(at); + __ nop(); // Branch delay slot nop. + for (int i = 0; i < kNumCases; ++i) { + __ dd(&labels[i]); + } + } + + for (int i = 0; i < kNumCases; ++i) { + __ bind(&labels[i]); + __ lui(v0, (values[i] >> 16) & 0xffff); + __ ori(v0, v0, values[i] & 0xffff); + __ Branch(&done); + } + + __ bind(&done); + __ lw(ra, MemOperand(sp)); + __ addiu(sp, sp, 4); + __ jr(ra); + __ nop(); + + __ bind(&end); + __ Branch(&near_start); + + CodeDesc desc; + masm->GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef OBJECT_PRINT + code->Print(std::cout); +#endif + F1 f = FUNCTION_CAST(code->entry()); + for (int i = 0; i < kNumCases; ++i) { + int res = reinterpret_cast(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0)); + ::printf("f(%d) = %d\n", i, res); + CHECK_EQ(values[i], res); + } +} + + #undef __ diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc index eef658de67fef5..fadd45f43b08a7 100644 --- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc +++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc @@ -26,10 +26,12 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include +#include // NOLINT(readability/streams) #include "src/v8.h" #include "test/cctest/cctest.h" +#include "src/base/utils/random-number-generator.h" #include "src/macro-assembler.h" #include "src/mips64/macro-assembler-mips64.h" #include "src/mips64/simulator-mips64.h" @@ -38,6 +40,7 @@ using namespace v8::internal; typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4); +typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4); #define __ masm-> @@ -214,4 +217,90 @@ TEST(LoadAddress) { // Check results. } + +TEST(jump_tables4) { + // Similar to test-assembler-mips jump_tables1, with extra test for branch + // trampoline required before emission of the dd table (where trampolines are + // blocked), and proper transition to long-branch mode. + // Regression test for v8:4294. + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); + MacroAssembler assembler(isolate, NULL, 0); + MacroAssembler* masm = &assembler; + + const int kNumCases = 512; + int values[kNumCases]; + isolate->random_number_generator()->NextBytes(values, sizeof(values)); + Label labels[kNumCases]; + Label near_start, end; + + __ daddiu(sp, sp, -8); + __ sd(ra, MemOperand(sp)); + if ((masm->pc_offset() & 7) == 0) { + __ nop(); + } + + __ mov(v0, zero_reg); + + __ Branch(&end); + __ bind(&near_start); + + // Generate slightly less than 32K instructions, which will soon require + // trampoline for branch distance fixup. + for (int i = 0; i < 32768 - 256; ++i) { + __ addiu(v0, v0, 1); + } + + Label done; + { + __ BlockTrampolinePoolFor(kNumCases * 2 + 6); + PredictableCodeSizeScope predictable( + masm, (kNumCases * 2 + 6) * Assembler::kInstrSize); + Label here; + + __ bal(&here); + __ dsll(at, a0, 3); // In delay slot. + __ bind(&here); + __ daddu(at, at, ra); + __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); + __ jr(at); + __ nop(); // Branch delay slot nop. + for (int i = 0; i < kNumCases; ++i) { + __ dd(&labels[i]); + } + } + + for (int i = 0; i < kNumCases; ++i) { + __ bind(&labels[i]); + __ lui(v0, (values[i] >> 16) & 0xffff); + __ ori(v0, v0, values[i] & 0xffff); + __ Branch(&done); + } + + __ bind(&done); + __ ld(ra, MemOperand(sp)); + __ daddiu(sp, sp, 8); + __ jr(ra); + __ nop(); + + __ bind(&end); + __ Branch(&near_start); + + CodeDesc desc; + masm->GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef OBJECT_PRINT + code->Print(std::cout); +#endif + F1 f = FUNCTION_CAST(code->entry()); + for (int i = 0; i < kNumCases; ++i) { + int64_t res = + reinterpret_cast(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0)); + ::printf("f(%d) = %" PRId64 "\n", i, res); + CHECK_EQ(values[i], res); + } +} + #undef __ diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc index 1ed43ac338aa78..938178efb96107 100644 --- a/deps/v8/test/cctest/test-serialize.cc +++ b/deps/v8/test/cctest/test-serialize.cc @@ -329,7 +329,7 @@ UNINITIALIZED_TEST(PartialSerialization) { &partial_sink); partial_serializer.Serialize(&raw_foo); - startup_serializer.SerializeWeakReferences(); + startup_serializer.SerializeWeakReferencesAndDeferred(); SnapshotData startup_snapshot(startup_serializer); SnapshotData partial_snapshot(partial_serializer); @@ -447,7 +447,7 @@ UNINITIALIZED_TEST(ContextSerialization) { PartialSerializer partial_serializer(isolate, &startup_serializer, &partial_sink); partial_serializer.Serialize(&raw_context); - startup_serializer.SerializeWeakReferences(); + startup_serializer.SerializeWeakReferencesAndDeferred(); SnapshotData startup_snapshot(startup_serializer); SnapshotData partial_snapshot(partial_serializer); @@ -582,7 +582,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) { PartialSerializer partial_serializer(isolate, &startup_serializer, &partial_sink); partial_serializer.Serialize(&raw_context); - startup_serializer.SerializeWeakReferences(); + startup_serializer.SerializeWeakReferencesAndDeferred(); SnapshotData startup_snapshot(startup_serializer); SnapshotData partial_snapshot(partial_serializer); @@ -702,6 +702,57 @@ TEST(PerIsolateSnapshotBlobs) { } +static void SerializationFunctionTemplate( + const v8::FunctionCallbackInfo& args) { + args.GetReturnValue().Set(args[0]); +} + + +TEST(PerIsolateSnapshotBlobsOutdatedContextWithOverflow) { + DisableTurbofan(); + + const char* source1 = + "var o = {};" + "(function() {" + " function f1(x) { return f2(x) instanceof Array; }" + " function f2(x) { return foo.bar(x); }" + " o.a = f2.bind(null);" + " o.b = 1;" + " o.c = 2;" + " o.d = 3;" + " o.e = 4;" + "})();\n"; + + const char* source2 = "o.a(42)"; + + v8::StartupData data = v8::V8::CreateSnapshotDataBlob(source1); + + v8::Isolate::CreateParams params; + params.snapshot_blob = &data; + params.array_buffer_allocator = CcTest::array_buffer_allocator(); + + v8::Isolate* isolate = v8::Isolate::New(params); + { + v8::Isolate::Scope i_scope(isolate); + v8::HandleScope h_scope(isolate); + + v8::Local global = v8::ObjectTemplate::New(isolate); + v8::Local property = v8::ObjectTemplate::New(isolate); + v8::Local function = + v8::FunctionTemplate::New(isolate, SerializationFunctionTemplate); + property->Set(isolate, "bar", function); + global->Set(isolate, "foo", property); + + v8::Local context = v8::Context::New(isolate, NULL, global); + delete[] data.data; // We can dispose of the snapshot blob now. + v8::Context::Scope c_scope(context); + v8::Local result = CompileRun(source2); + CHECK(v8_str("42")->Equals(result)); + } + isolate->Dispose(); +} + + TEST(PerIsolateSnapshotBlobsWithLocker) { DisableTurbofan(); v8::Isolate::CreateParams create_params; @@ -738,6 +789,44 @@ TEST(PerIsolateSnapshotBlobsWithLocker) { } +TEST(SnapshotBlobsStackOverflow) { + DisableTurbofan(); + const char* source = + "var a = [0];" + "var b = a;" + "for (var i = 0; i < 10000; i++) {" + " var c = [i];" + " b.push(c);" + " b.push(c);" + " b = c;" + "}"; + + v8::StartupData data = v8::V8::CreateSnapshotDataBlob(source); + + v8::Isolate::CreateParams params; + params.snapshot_blob = &data; + params.array_buffer_allocator = CcTest::array_buffer_allocator(); + + v8::Isolate* isolate = v8::Isolate::New(params); + { + v8::Isolate::Scope i_scope(isolate); + v8::HandleScope h_scope(isolate); + v8::Local context = v8::Context::New(isolate); + delete[] data.data; // We can dispose of the snapshot blob now. + v8::Context::Scope c_scope(context); + const char* test = + "var sum = 0;" + "while (a) {" + " sum += a[0];" + " a = a[1];" + "}" + "sum"; + CHECK_EQ(9999 * 5000, CompileRun(test)->ToInt32(isolate)->Int32Value()); + } + isolate->Dispose(); +} + + TEST(TestThatAlwaysSucceeds) { } diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js new file mode 100644 index 00000000000000..9bb313ffbef533 --- /dev/null +++ b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js @@ -0,0 +1,80 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --harmony-proxies + +"use strict"; + + +var global = this; + + +(function TestGlobalReceiver() { + class A { + s() { + super.bla = 10; + } + } + new A().s.call(global); + assertEquals(10, global.bla); +})(); + + +(function TestProxyProto() { + var calls = 0; + var handler = { + getPropertyDescriptor: function(name) { + calls++; + return undefined; + } + }; + + var proto = {}; + var proxy = Proxy.create(handler, proto); + var object = { + __proto__: proxy, + setX(v) { + super.x = v; + }, + setSymbol(sym, v) { + super[sym] = v; + } + }; + + object.setX(1); + assertEquals(1, Object.getOwnPropertyDescriptor(object, 'x').value); + assertEquals(1, calls); + + var sym = Symbol(); + object.setSymbol.call(global, sym, 2); + assertEquals(2, Object.getOwnPropertyDescriptor(global, sym).value); + // We currently do not invoke proxy traps for symbols + assertEquals(1, calls); +})(); + + +(function TestProxyReceiver() { + var object = { + setY(v) { + super.y = v; + } + }; + + var calls = 0; + var handler = { + getPropertyDescriptor(name) { + assertUnreachable(); + }, + set(receiver, name, value) { + calls++; + assertEquals(proxy, receiver); + assertEquals('y', name); + assertEquals(3, value); + } + }; + + var proxy = Proxy.create(handler); + object.setY.call(proxy, 3); + assertEquals(1, calls); +})(); diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index dfbe3b5d638a27..71a7de4ec7d03e 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -561,10 +561,6 @@ 'math-floor-of-div-minus-zero': [SKIP], }], # 'arch == mips64el' -['arch == mips64el and simulator_run == False', { - # Random failures on HW, need investigation. - 'debug-*': [SKIP], -}], ############################################################################## ['system == windows', { # TODO(mstarzinger): Too slow with turbo fan. diff --git a/deps/v8/test/mjsunit/regress/regress-487981.js b/deps/v8/test/mjsunit/regress/regress-487981.js new file mode 100644 index 00000000000000..829c25c59d230e --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-487981.js @@ -0,0 +1,22 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --stress-compaction + +// To reliably reproduce the crash use --verify-heap --random-seed=-133185440 + +function __f_2(o) { + return o.field.b.x; +} + +try { + %OptimizeFunctionOnNextCall(__f_2); + __v_1 = __f_2(); +} catch(e) { } + +function __f_3() { __f_3(/./.test()); }; + +try { +__f_3(); +} catch(e) { } diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-478612.js b/deps/v8/test/mjsunit/regress/regress-crbug-478612.js new file mode 100644 index 00000000000000..3419722cd018d9 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-478612.js @@ -0,0 +1,52 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +// This is used to force binary operations below to have tagged representation. +var z = {valueOf: function() { return 3; }}; + + +function f() { + var y = -2; + return (1 & z) - y++; +} + +assertEquals(3, f()); +assertEquals(3, f()); +%OptimizeFunctionOnNextCall(f); +assertEquals(3, f()); + + +function g() { + var y = 2; + return (1 & z) | y++; +} + +assertEquals(3, g()); +assertEquals(3, g()); +%OptimizeFunctionOnNextCall(g); +assertEquals(3, g()); + + +function h() { + var y = 3; + return (3 & z) & y++; +} + +assertEquals(3, h()); +assertEquals(3, h()); +%OptimizeFunctionOnNextCall(h); +assertEquals(3, h()); + + +function i() { + var y = 2; + return (1 & z) ^ y++; +} + +assertEquals(3, i()); +assertEquals(3, i()); +%OptimizeFunctionOnNextCall(i); +assertEquals(3, i()); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js new file mode 100644 index 00000000000000..9117440c2c843f --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js @@ -0,0 +1,33 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// New space must be at max capacity to trigger pretenuring decision. +// Flags: --allow-natives-syntax --verify-heap --max-semi-space-size=1 + +var global = []; // Used to keep some objects alive. + +function Ctor() { + var result = {a: {}, b: {}, c: {}, d: {}, e: {}, f: {}, g: {}}; + return result; +} + +for (var i = 0; i < 120; i++) { + // Make the "a" property long-lived, while everything else is short-lived. + global.push(Ctor().a); + (function FillNewSpace() { new Array(10000); })(); +} + +// The bad situation is only triggered if Ctor wasn't optimized too early. +assertUnoptimized(Ctor); +// Optimized code for Ctor will pretenure the "a" property, so it will have +// three allocations: +// #1 Allocate the "result" object in new-space. +// #2 Allocate the object stored in the "a" property in old-space. +// #3 Allocate the objects for the "b" through "g" properties in new-space. +%OptimizeFunctionOnNextCall(Ctor); +for (var i = 0; i < 10000; i++) { + // At least one of these calls will run out of new space. The bug is + // triggered when it is allocation #3 that triggers GC. + Ctor(); +} diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-502930.js b/deps/v8/test/mjsunit/regress/regress-crbug-502930.js new file mode 100644 index 00000000000000..ef21a1a69e1eb4 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-502930.js @@ -0,0 +1,27 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +var accessor_to_data_case = (function() { + var v = {}; + Object.defineProperty(v, "foo", { get: function() { return 42; }, configurable: true}); + + var obj = {}; + obj["boom"] = v; + + Object.defineProperty(v, "foo", { value: 0, writable: true, configurable: true }); + return obj; +})(); + + +var data_to_accessor_case = (function() { + var v = {}; + Object.defineProperty(v, "bar", { value: 0, writable: true, configurable: true }); + + var obj = {}; + obj["bam"] = v; + + Object.defineProperty(v, "bar", { get: function() { return 42; }, configurable: true}); + return obj; +})(); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-514268.js b/deps/v8/test/mjsunit/regress/regress-crbug-514268.js new file mode 100644 index 00000000000000..75d9970eed7d3a --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-514268.js @@ -0,0 +1,23 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function bar(a) { + a.pop(); +} +function foo(a) { + assertEquals(2, a.length); + var d; + for (d in a) { + bar(a); + } + // If this fails, bar was not called exactly once. + assertEquals(1, a.length); +} + +foo([1,2]); +foo([2,3]); +%OptimizeFunctionOnNextCall(foo); +foo([1,2]);