Changeset 283089 in webkit for trunk/Source/JavaScriptCore/jit/JIT.cpp
- Timestamp:
- Sep 26, 2021, 2:20:52 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/jit/JIT.cpp
r283083 r283089 70 70 71 71 JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex) 72 : JSInterfaceJIT(&vm, nullptr)72 : JSInterfaceJIT(&vm, codeBlock) 73 73 , m_interpreter(vm.interpreter) 74 74 , m_labels(codeBlock ? codeBlock->instructions().size() : 0) … … 78 78 , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex) 79 79 { 80 m_globalObjectConstant = m_constantPool.add(JITConstantPool::Type::GlobalObject);81 m_profiledCodeBlock = codeBlock;82 m_unlinkedCodeBlock = codeBlock->unlinkedCodeBlock();83 80 } 84 81 … … 94 91 95 92 JumpList skipOptimize; 96 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);97 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), A ddress(regT0, CodeBlock::offsetOfJITExecuteCounter())));93 94 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); 98 95 ASSERT(!m_bytecodeIndex.offset()); 99 96 … … 117 114 } 118 115 119 void JIT::emitNotifyWriteWatchpoint(GPRReg pointerToSet) 120 { 121 auto ok = branchTestPtr(Zero, pointerToSet); 116 void JIT::emitNotifyWrite(GPRReg pointerToSet) 117 { 122 118 addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 123 ok.link(this); 124 } 125 126 void JIT::emitVarReadOnlyCheck(ResolveType resolveType, GPRReg scratchGPR) 127 { 128 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) { 129 loadGlobalObject(scratchGPR); 130 loadPtr(Address(scratchGPR, OBJECT_OFFSETOF(JSGlobalObject, m_varReadOnlyWatchpoint)), scratchGPR); 131 addSlowCase(branch8(Equal, Address(scratchGPR, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 132 } 119 } 120 121 void JIT::emitVarReadOnlyCheck(ResolveType resolveType) 122 { 123 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) 124 addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varReadOnlyWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); 133 125 } 134 126 … … 138 130 return; 139 131 140 addPtr(TrustedImm32(stackPointerOffsetFor(m_ unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, regT0);132 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); 141 133 Jump ok = branchPtr(Equal, regT0, stackPointerRegister); 142 134 breakpoint(); 143 135 ok.link(this); 144 }145 146 void JIT::resetSP()147 {148 addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);149 checkStackPointerAlignment();150 136 } 151 137 … … 196 182 } 197 183 198 void JIT::emitPutCodeBlockToFrameInPrologue(GPRReg result)199 {200 RELEASE_ASSERT(m_unlinkedCodeBlock->codeType() == FunctionCode);201 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, result);202 loadPtr(Address(result, JSFunction::offsetOfExecutableOrRareData()), result);203 auto hasExecutable = branchTestPtr(Zero, result, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));204 loadPtr(Address(result, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), result);205 hasExecutable.link(this);206 if (m_unlinkedCodeBlock->isConstructor())207 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForConstruct()), result);208 else209 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForCall()), result);210 211 loadPtr(Address(result, ExecutableToCodeBlockEdge::offsetOfCodeBlock()), result);212 emitPutToCallFrameHeader(result, CallFrameSlot::codeBlock);213 214 #if ASSERT_ENABLED215 probeDebug([=] (Probe::Context& ctx) {216 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();217 RELEASE_ASSERT(codeBlock->jitType() == JITType::BaselineJIT);218 });219 #endif220 }221 222 184 void JIT::privateCompileMainPass() 223 185 { 224 186 if (JITInternal::verbose) 225 dataLog("Compiling ", *m_ profiledCodeBlock, "\n");187 dataLog("Compiling ", *m_codeBlock, "\n"); 226 188 227 189 jitAssertTagsInPlace(); 228 190 jitAssertArgumentCountSane(); 229 191 230 auto& instructions = m_ unlinkedCodeBlock->instructions();231 unsigned instructionCount = m_ unlinkedCodeBlock->instructions().size();192 auto& instructions = m_codeBlock->instructions(); 193 unsigned instructionCount = m_codeBlock->instructions().size(); 232 194 233 195 m_callLinkInfoIndex = 0; 234 196 197 VM& vm = m_codeBlock->vm(); 235 198 BytecodeIndex startBytecodeIndex(0); 199 if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { 200 // We can only do this optimization because we execute ProgramCodeBlock's exactly once. 201 // This optimization would be invalid otherwise. When the LLInt determines it wants to 202 // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it 203 // was executing at when it kicked off our compilation. We only need to compile code for 204 // anything reachable from that bytecode offset. 205 206 // We only bother building the bytecode graph if it could save time and executable 207 // memory. We pick an arbitrary offset where we deem this is profitable. 208 if (m_loopOSREntryBytecodeIndex.offset() >= 200) { 209 // As a simplification, we don't find all bytecode ranges that are unreachable. 210 // Instead, we just find the minimum bytecode offset that is reachable, and 211 // compile code from that bytecode offset onwards. 212 213 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions()); 214 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset()); 215 RELEASE_ASSERT(block); 216 217 GraphNodeWorklist<BytecodeBasicBlock*> worklist; 218 startBytecodeIndex = BytecodeIndex(); 219 worklist.push(block); 220 221 while (BytecodeBasicBlock* block = worklist.pop()) { 222 startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset())); 223 for (unsigned successorIndex : block->successors()) 224 worklist.push(&graph[successorIndex]); 225 226 // Also add catch blocks for bytecodes that throw. 227 if (m_codeBlock->numberOfExceptionHandlers()) { 228 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { 229 auto instruction = instructions.at(bytecodeOffset); 230 if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset))) 231 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target)); 232 233 bytecodeOffset += instruction->size(); 234 } 235 } 236 } 237 } 238 } 236 239 237 240 m_bytecodeCountHavingSlowCase = 0; … … 276 279 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 277 280 if (UNLIKELY(Options::traceBaselineJITExecution())) { 281 CodeBlock* codeBlock = m_codeBlock; 278 282 probeDebug([=] (Probe::Context& ctx) { 279 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();280 283 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 281 284 }); 282 285 } 283 284 if (opcodeID != op_catch)285 assertStackPointerOffset();286 286 287 287 switch (opcodeID) { … … 528 528 BytecodeIndex firstTo = m_bytecodeIndex; 529 529 530 const Instruction* currentInstruction = m_ unlinkedCodeBlock->instructions().at(m_bytecodeIndex).ptr();530 const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeIndex).ptr(); 531 531 532 532 if (JITInternal::verbose) … … 546 546 if (UNLIKELY(Options::traceBaselineJITExecution())) { 547 547 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 548 CodeBlock* codeBlock = m_codeBlock; 548 549 probeDebug([=] (Probe::Context& ctx) { 549 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();550 550 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 551 551 }); … … 675 675 } 676 676 677 void JIT::emitMaterializeMetadataAndConstantPoolRegisters()678 {679 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);680 loadPtr(Address(regT0, CodeBlock::offsetOfMetadataTable()), s_metadataGPR);681 loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0);682 loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR);683 }684 685 void JIT::emitRestoreCalleeSaves()686 {687 Base::emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());688 }689 690 677 void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort) 691 678 { 692 DFG::CapabilityLevel level = m_ profiledCodeBlock->capabilityLevel();679 DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); 693 680 switch (level) { 694 681 case DFG::CannotCompile: 695 682 m_canBeOptimized = false; 683 m_canBeOptimizedOrInlined = false; 696 684 m_shouldEmitProfiling = false; 697 685 break; … … 699 687 case DFG::CanCompileAndInline: 700 688 m_canBeOptimized = true; 689 m_canBeOptimizedOrInlined = true; 701 690 m_shouldEmitProfiling = true; 702 691 break; … … 705 694 break; 706 695 } 707 708 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables() || m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 709 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()) 710 m_switchJumpTables = FixedVector<SimpleJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()); 711 if (m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) 712 m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()); 713 } 714 715 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) { 716 // FIXME: build a disassembler off of UnlinkedCodeBlock. 717 m_disassembler = makeUnique<JITDisassembler>(m_profiledCodeBlock); 718 } 696 697 switch (m_codeBlock->codeType()) { 698 case GlobalCode: 699 case ModuleCode: 700 case EvalCode: 701 m_codeBlock->m_shouldAlwaysBeInlined = false; 702 break; 703 case FunctionCode: 704 // We could have already set it to false because we detected an uninlineable call. 705 // Don't override that observation. 706 m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); 707 break; 708 } 709 710 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables() || m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 711 ConcurrentJSLocker locker(m_codeBlock->m_lock); 712 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables()) 713 m_codeBlock->ensureJITData(locker).m_switchJumpTables = FixedVector<SimpleJumpTable>(m_codeBlock->numberOfUnlinkedSwitchJumpTables()); 714 if (m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) 715 m_codeBlock->ensureJITData(locker).m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()); 716 } 717 718 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) 719 m_disassembler = makeUnique<JITDisassembler>(m_codeBlock); 719 720 if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { 720 // FIXME: build profiler disassembler off UnlinkedCodeBlock.721 721 m_compilation = adoptRef( 722 722 new Profiler::Compilation( 723 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_ profiledCodeBlock),723 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), 724 724 Profiler::Baseline)); 725 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_ profiledCodeBlock);725 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); 726 726 } 727 727 … … 743 743 744 744 emitFunctionPrologue(); 745 if (m_unlinkedCodeBlock->codeType() == FunctionCode) 746 emitPutCodeBlockToFrameInPrologue(); 745 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 747 746 748 747 Label beginLabel(this); 749 748 750 int frameTopOffset = stackPointerOffsetFor(m_ unlinkedCodeBlock) * sizeof(Register);749 int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); 751 750 unsigned maxFrameSize = -frameTopOffset; 752 751 addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); … … 759 758 checkStackPointerAlignment(); 760 759 761 emitSaveCalleeSaves For(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());760 emitSaveCalleeSaves(); 762 761 emitMaterializeTagCheckRegisters(); 763 emitMaterializeMetadataAndConstantPoolRegisters(); 764 765 if (m_unlinkedCodeBlock->codeType() == FunctionCode) { 762 763 if (m_codeBlock->codeType() == FunctionCode) { 766 764 ASSERT(!m_bytecodeIndex); 767 if (shouldEmitProfiling() && (!m_unlinkedCodeBlock->isConstructor() || m_unlinkedCodeBlock->numParameters() > 1)) { 768 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT2); 769 loadPtr(Address(regT2, CodeBlock::offsetOfArgumentValueProfiles() + FixedVector<ValueProfile>::offsetOfStorage()), regT2); 770 771 for (unsigned argument = 0; argument < m_unlinkedCodeBlock->numParameters(); ++argument) { 765 if (shouldEmitProfiling()) { 766 for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) { 772 767 // If this is a constructor, then we want to put in a dummy profiling site (to 773 768 // keep things consistent) but we don't actually want to record the dummy value. 774 if (m_ unlinkedCodeBlock->isConstructor() && !argument)769 if (m_codeBlock->isConstructor() && !argument) 775 770 continue; 776 771 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); … … 783 778 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR()); 784 779 #endif 785 storeValue(resultRegs, Address(regT2, argument * sizeof(ValueProfile) + ValueProfile::offsetOfFirstBucket()));780 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), resultRegs); 786 781 } 787 782 } 788 783 } 789 784 790 RELEASE_ASSERT(!JITCode::isJIT(m_ profiledCodeBlock->jitType()));785 RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); 791 786 792 787 if (UNLIKELY(sizeMarker)) … … 805 800 if (maxFrameExtentForSlowPathCall) 806 801 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 807 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT0); 808 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, regT0); 802 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); 809 803 810 804 // If the number of parameters is 1, we never require arity fixup. 811 bool requiresArityFixup = m_ unlinkedCodeBlock->numParameters()!= 1;812 if (m_ unlinkedCodeBlock->codeType() == FunctionCode && requiresArityFixup) {805 bool requiresArityFixup = m_codeBlock->m_numParameters != 1; 806 if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { 813 807 m_arityCheck = label(); 814 808 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); 815 809 emitFunctionPrologue(); 816 emitPutCodeBlockToFrameInPrologue(regT0); 817 store8(TrustedImm32(0), Address(regT0, CodeBlock::offsetOfShouldAlwaysBeInlined())); 810 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 818 811 819 812 load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1); 820 branch32(AboveOrEqual, regT1, TrustedImm32(m_ unlinkedCodeBlock->numParameters())).linkTo(beginLabel, this);813 branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); 821 814 822 815 m_bytecodeIndex = BytecodeIndex(0); … … 824 817 if (maxFrameExtentForSlowPathCall) 825 818 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 826 loadPtr(Address(regT0, CodeBlock::offsetOfGlobalObject()), argumentGPR0); 827 callOperationWithCallFrameRollbackOnException(m_unlinkedCodeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, argumentGPR0); 819 callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, m_codeBlock->globalObject()); 828 820 if (maxFrameExtentForSlowPathCall) 829 821 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); … … 848 840 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); 849 841 850 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_ unlinkedCodeBlock, LinkBuffer::Profile::BaselineJIT, effort));842 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, LinkBuffer::Profile::BaselineJIT, effort)); 851 843 link(); 852 844 } … … 867 859 case SwitchRecord::Immediate: 868 860 case SwitchRecord::Character: { 869 const UnlinkedSimpleJumpTable& unlinkedTable = m_ unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);870 SimpleJumpTable& linkedTable = m_ switchJumpTables[tableIndex];861 const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex); 862 SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex); 871 863 linkedTable.m_ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 872 864 for (unsigned j = 0; j < unlinkedTable.m_branchOffsets.size(); ++j) { … … 880 872 881 873 case SwitchRecord::String: { 882 const UnlinkedStringJumpTable& unlinkedTable = m_ unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex);883 StringJumpTable& linkedTable = m_ stringSwitchJumpTables[tableIndex];874 const UnlinkedStringJumpTable& unlinkedTable = m_codeBlock->unlinkedStringSwitchJumpTable(tableIndex); 875 StringJumpTable& linkedTable = m_codeBlock->stringSwitchJumpTable(tableIndex); 884 876 auto ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 885 877 for (auto& location : unlinkedTable.m_offsetTable.values()) { … … 915 907 } 916 908 917 #if USE(JSVALUE64)918 auto finalizeICs = [&] (auto& generators) {919 for (auto& gen : generators) {920 gen.m_unlinkedStubInfo->start = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_start);921 gen.m_unlinkedStubInfo->doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(gen.m_done);922 gen.m_unlinkedStubInfo->slowPathStartLocation = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_slowPathBegin);923 }924 };925 926 finalizeICs(m_getByIds);927 finalizeICs(m_getByVals);928 finalizeICs(m_getByIdsWithThis);929 finalizeICs(m_putByIds);930 finalizeICs(m_putByVals);931 finalizeICs(m_delByIds);932 finalizeICs(m_delByVals);933 finalizeICs(m_inByIds);934 finalizeICs(m_inByVals);935 finalizeICs(m_instanceOfs);936 finalizeICs(m_privateBrandAccesses);937 #else938 909 finalizeInlineCaches(m_getByIds, patchBuffer); 939 910 finalizeInlineCaches(m_getByVals, patchBuffer); … … 947 918 finalizeInlineCaches(m_instanceOfs, patchBuffer); 948 919 finalizeInlineCaches(m_privateBrandAccesses, patchBuffer); 949 #endif950 920 951 921 for (auto& compilationInfo : m_callCompilationInfo) { 952 #if USE(JSVALUE64)953 UnlinkedCallLinkInfo& info = *compilationInfo.unlinkedCallLinkInfo;954 info.doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation);955 #else956 922 CallLinkInfo& info = *compilationInfo.callLinkInfo; 957 923 info.setCodeLocations( 958 924 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.slowPathStart), 959 925 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation)); 960 #endif 961 962 } 963 964 JITCodeMapBuilder jitCodeMapBuilder; 965 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 966 if (m_labels[bytecodeOffset].isSet()) 967 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 926 } 927 928 { 929 JITCodeMapBuilder jitCodeMapBuilder; 930 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 931 if (m_labels[bytecodeOffset].isSet()) 932 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 933 } 934 m_codeBlock->setJITCodeMap(jitCodeMapBuilder.finalize()); 968 935 } 969 936 … … 974 941 975 942 if (UNLIKELY(m_compilation)) { 976 // FIXME: should we make the bytecode profiler know about UnlinkedCodeBlock?977 943 if (Options::disassembleBaselineForProfiler()) 978 944 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); 979 m_vm->m_perBytecodeProfiler->addCompilation(m_ profiledCodeBlock, *m_compilation);945 m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation); 980 946 } 981 947 … … 983 949 m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer); 984 950 985 // FIXME: Make a version of CodeBlockWithJITType that knows about UnlinkedCodeBlock.986 951 CodeRef<JSEntryPtrTag> result = FINALIZE_CODE( 987 952 patchBuffer, JSEntryPtrTag, 988 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_ profiledCodeBlock, JITType::BaselineJIT)).data());953 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data()); 989 954 990 955 MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); 991 m_jitCode = adoptRef(*new BaselineJITCode(result, withArityCheck)); 992 993 m_jitCode->m_unlinkedCalls = WTFMove(m_unlinkedCalls); 994 m_jitCode->m_evalCallLinkInfos = WTFMove(m_evalCallLinkInfos); 995 m_jitCode->m_unlinkedStubInfos = WTFMove(m_unlinkedStubInfos); 996 m_jitCode->m_switchJumpTables = WTFMove(m_switchJumpTables); 997 m_jitCode->m_stringSwitchJumpTables = WTFMove(m_stringSwitchJumpTables); 998 m_jitCode->m_jitCodeMap = jitCodeMapBuilder.finalize(); 999 m_jitCode->adoptMathICs(m_mathICs); 1000 m_jitCode->m_constantPool = WTFMove(m_constantPool); 1001 #if USE(JSVALUE64) 1002 m_jitCode->m_isShareable = m_isShareable; 1003 #else 1004 m_jitCode->m_isShareable = false; 1005 #endif 956 m_jitCode = adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)); 1006 957 1007 958 if (JITInternal::verbose) 1008 dataLogF("JIT generated code for %p at [%p, %p).\n", m_ unlinkedCodeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());1009 } 1010 1011 CompilationResult JIT::finalizeOnMainThread( CodeBlock* codeBlock)959 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); 960 } 961 962 CompilationResult JIT::finalizeOnMainThread() 1012 963 { 1013 964 RELEASE_ASSERT(!isCompilationThread()); … … 1018 969 m_linkBuffer->runMainThreadFinalizationTasks(); 1019 970 971 { 972 ConcurrentJSLocker locker(m_codeBlock->m_lock); 973 m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink); 974 } 975 976 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { 977 HandlerInfo& handler = m_codeBlock->exceptionHandler(i); 978 // FIXME: <rdar://problem/39433318>. 979 handler.nativeCode = m_codeBlock->jitCodeMap().find(BytecodeIndex(handler.target)).retagged<ExceptionHandlerPtrTag>(); 980 } 981 1020 982 if (m_pcToCodeOriginMap) 1021 m_ jitCode->m_pcToCodeOriginMap = WTFMove(m_pcToCodeOriginMap);983 m_codeBlock->setPCToCodeOriginMap(WTFMove(m_pcToCodeOriginMap)); 1022 984 1023 985 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( 1024 986 static_cast<double>(m_jitCode->size()) / 1025 static_cast<double>(m_ unlinkedCodeBlock->instructionsSize()));1026 1027 codeBlock->setupWithUnlinkedBaselineCode(m_jitCode.releaseNonNull());987 static_cast<double>(m_codeBlock->instructionsSize())); 988 989 m_codeBlock->setJITCode(m_jitCode.releaseNonNull()); 1028 990 1029 991 return CompilationSuccessful; … … 1037 999 } 1038 1000 1039 CompilationResult JIT::privateCompile( CodeBlock* codeBlock,JITCompilationEffort effort)1001 CompilationResult JIT::privateCompile(JITCompilationEffort effort) 1040 1002 { 1041 1003 doMainThreadPreparationBeforeCompile(); 1042 1004 compileAndLinkWithoutFinalizing(effort); 1043 return finalizeOnMainThread( codeBlock);1005 return finalizeOnMainThread(); 1044 1006 } 1045 1007 … … 1081 1043 } 1082 1044 1083 unsigned JIT::frameRegisterCountFor( UnlinkedCodeBlock* codeBlock)1045 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) 1084 1046 { 1085 1047 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); … … 1088 1050 } 1089 1051 1090 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) 1091 { 1092 return frameRegisterCountFor(codeBlock->unlinkedCodeBlock()); 1093 } 1094 1095 int JIT::stackPointerOffsetFor(UnlinkedCodeBlock* codeBlock) 1052 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) 1096 1053 { 1097 1054 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); 1098 }1099 1100 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)1101 {1102 return stackPointerOffsetFor(codeBlock->unlinkedCodeBlock());1103 1055 } 1104 1056
Note:
See TracChangeset
for help on using the changeset viewer.