Changeset 283139 in webkit for trunk/Source/JavaScriptCore/jit/JIT.cpp
- Timestamp:
- Sep 27, 2021, 2:53:19 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/jit/JIT.cpp
r283129 r283139 70 70 71 71 JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex) 72 : JSInterfaceJIT(&vm, codeBlock)72 : JSInterfaceJIT(&vm, nullptr) 73 73 , m_interpreter(vm.interpreter) 74 74 , m_labels(codeBlock ? codeBlock->instructions().size() : 0) … … 78 78 , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex) 79 79 { 80 m_globalObjectConstant = m_constantPool.add(JITConstantPool::Type::GlobalObject); 81 m_profiledCodeBlock = codeBlock; 82 m_unlinkedCodeBlock = codeBlock->unlinkedCodeBlock(); 80 83 } 81 84 … … 91 94 92 95 JumpList skipOptimize; 93 94 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), A bsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));96 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0); 97 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(regT0, CodeBlock::offsetOfJITExecuteCounter()))); 95 98 ASSERT(!m_bytecodeIndex.offset()); 96 99 … … 114 117 } 115 118 116 void JIT::emitNotifyWrite(GPRReg pointerToSet) 117 { 119 void JIT::emitNotifyWriteWatchpoint(GPRReg pointerToSet) 120 { 121 auto ok = branchTestPtr(Zero, pointerToSet); 118 122 addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 119 } 120 121 void JIT::emitVarReadOnlyCheck(ResolveType resolveType) 122 { 123 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) 124 addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varReadOnlyWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); 123 ok.link(this); 124 } 125 126 void JIT::emitVarReadOnlyCheck(ResolveType resolveType, GPRReg scratchGPR) 127 { 128 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) { 129 loadGlobalObject(scratchGPR); 130 loadPtr(Address(scratchGPR, OBJECT_OFFSETOF(JSGlobalObject, m_varReadOnlyWatchpoint)), scratchGPR); 131 addSlowCase(branch8(Equal, Address(scratchGPR, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 132 } 125 133 } 126 134 … … 130 138 return; 131 139 132 addPtr(TrustedImm32(stackPointerOffsetFor(m_ codeBlock) * sizeof(Register)), callFrameRegister, regT0);140 addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, regT0); 133 141 Jump ok = branchPtr(Equal, regT0, stackPointerRegister); 134 142 breakpoint(); 135 143 ok.link(this); 144 } 145 146 void JIT::resetSP() 147 { 148 addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); 149 checkStackPointerAlignment(); 136 150 } 137 151 … … 182 196 } 183 197 198 void JIT::emitPutCodeBlockToFrameInPrologue(GPRReg result) 199 { 200 RELEASE_ASSERT(m_unlinkedCodeBlock->codeType() == FunctionCode); 201 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, result); 202 loadPtr(Address(result, JSFunction::offsetOfExecutableOrRareData()), result); 203 auto hasExecutable = branchTestPtr(Zero, result, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)); 204 loadPtr(Address(result, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), result); 205 hasExecutable.link(this); 206 if (m_unlinkedCodeBlock->isConstructor()) 207 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForConstruct()), result); 208 else 209 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForCall()), result); 210 211 loadPtr(Address(result, ExecutableToCodeBlockEdge::offsetOfCodeBlock()), result); 212 emitPutToCallFrameHeader(result, CallFrameSlot::codeBlock); 213 214 #if ASSERT_ENABLED 215 probeDebug([=] (Probe::Context& ctx) { 216 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 217 RELEASE_ASSERT(codeBlock->jitType() == JITType::BaselineJIT); 218 }); 219 #endif 220 } 221 184 222 void JIT::privateCompileMainPass() 185 223 { 186 224 if (JITInternal::verbose) 187 dataLog("Compiling ", *m_ codeBlock, "\n");225 dataLog("Compiling ", *m_profiledCodeBlock, "\n"); 188 226 189 227 jitAssertTagsInPlace(); 190 228 jitAssertArgumentCountSane(); 191 229 192 auto& instructions = m_ codeBlock->instructions();193 unsigned instructionCount = m_ codeBlock->instructions().size();230 auto& instructions = m_unlinkedCodeBlock->instructions(); 231 unsigned instructionCount = m_unlinkedCodeBlock->instructions().size(); 194 232 195 233 m_callLinkInfoIndex = 0; 196 234 197 VM& vm = m_codeBlock->vm();198 235 BytecodeIndex startBytecodeIndex(0); 199 if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {200 // We can only do this optimization because we execute ProgramCodeBlock's exactly once.201 // This optimization would be invalid otherwise. When the LLInt determines it wants to202 // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it203 // was executing at when it kicked off our compilation. We only need to compile code for204 // anything reachable from that bytecode offset.205 206 // We only bother building the bytecode graph if it could save time and executable207 // memory. We pick an arbitrary offset where we deem this is profitable.208 if (m_loopOSREntryBytecodeIndex.offset() >= 200) {209 // As a simplification, we don't find all bytecode ranges that are unreachable.210 // Instead, we just find the minimum bytecode offset that is reachable, and211 // compile code from that bytecode offset onwards.212 213 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());214 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset());215 RELEASE_ASSERT(block);216 217 GraphNodeWorklist<BytecodeBasicBlock*> worklist;218 startBytecodeIndex = BytecodeIndex();219 worklist.push(block);220 221 while (BytecodeBasicBlock* block = worklist.pop()) {222 startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset()));223 for (unsigned successorIndex : block->successors())224 worklist.push(&graph[successorIndex]);225 226 // Also add catch blocks for bytecodes that throw.227 if (m_codeBlock->numberOfExceptionHandlers()) {228 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {229 auto instruction = instructions.at(bytecodeOffset);230 if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset)))231 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));232 233 bytecodeOffset += instruction->size();234 }235 }236 }237 }238 }239 236 240 237 m_bytecodeCountHavingSlowCase = 0; … … 279 276 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 280 277 if (UNLIKELY(Options::traceBaselineJITExecution())) { 281 CodeBlock* codeBlock = m_codeBlock;282 278 probeDebug([=] (Probe::Context& ctx) { 279 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 283 280 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 284 281 }); 285 282 } 283 284 if (opcodeID != op_catch) 285 assertStackPointerOffset(); 286 286 287 287 switch (opcodeID) { … … 529 529 BytecodeIndex firstTo = m_bytecodeIndex; 530 530 531 const Instruction* currentInstruction = m_ codeBlock->instructions().at(m_bytecodeIndex).ptr();531 const Instruction* currentInstruction = m_unlinkedCodeBlock->instructions().at(m_bytecodeIndex).ptr(); 532 532 533 533 if (JITInternal::verbose) … … 547 547 if (UNLIKELY(Options::traceBaselineJITExecution())) { 548 548 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 549 CodeBlock* codeBlock = m_codeBlock;550 549 probeDebug([=] (Probe::Context& ctx) { 550 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 551 551 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 552 552 }); … … 676 676 } 677 677 678 void JIT::emitMaterializeMetadataAndConstantPoolRegisters() 679 { 680 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0); 681 loadPtr(Address(regT0, CodeBlock::offsetOfMetadataTable()), s_metadataGPR); 682 loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0); 683 loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR); 684 } 685 686 void JIT::emitRestoreCalleeSaves() 687 { 688 Base::emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters()); 689 } 690 678 691 void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort) 679 692 { 680 DFG::CapabilityLevel level = m_ codeBlock->capabilityLevel();693 DFG::CapabilityLevel level = m_profiledCodeBlock->capabilityLevel(); 681 694 switch (level) { 682 695 case DFG::CannotCompile: 683 696 m_canBeOptimized = false; 684 m_canBeOptimizedOrInlined = false;685 697 m_shouldEmitProfiling = false; 686 698 break; … … 688 700 case DFG::CanCompileAndInline: 689 701 m_canBeOptimized = true; 690 m_canBeOptimizedOrInlined = true;691 702 m_shouldEmitProfiling = true; 692 703 break; … … 695 706 break; 696 707 } 697 698 switch (m_codeBlock->codeType()) { 699 case GlobalCode: 700 case ModuleCode: 701 case EvalCode: 702 m_codeBlock->m_shouldAlwaysBeInlined = false; 703 break; 704 case FunctionCode: 705 // We could have already set it to false because we detected an uninlineable call. 706 // Don't override that observation. 707 m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); 708 break; 709 } 710 711 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables() || m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 712 ConcurrentJSLocker locker(m_codeBlock->m_lock); 713 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables()) 714 m_codeBlock->ensureJITData(locker).m_switchJumpTables = FixedVector<SimpleJumpTable>(m_codeBlock->numberOfUnlinkedSwitchJumpTables()); 715 if (m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) 716 m_codeBlock->ensureJITData(locker).m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()); 717 } 718 719 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) 720 m_disassembler = makeUnique<JITDisassembler>(m_codeBlock); 708 709 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables() || m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 710 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()) 711 m_switchJumpTables = FixedVector<SimpleJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()); 712 if (m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) 713 m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()); 714 } 715 716 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) { 717 // FIXME: build a disassembler off of UnlinkedCodeBlock. 718 m_disassembler = makeUnique<JITDisassembler>(m_profiledCodeBlock); 719 } 721 720 if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { 721 // FIXME: build profiler disassembler off UnlinkedCodeBlock. 722 722 m_compilation = adoptRef( 723 723 new Profiler::Compilation( 724 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_ codeBlock),724 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_profiledCodeBlock), 725 725 Profiler::Baseline)); 726 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_ codeBlock);726 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_profiledCodeBlock); 727 727 } 728 728 … … 744 744 745 745 emitFunctionPrologue(); 746 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 746 if (m_unlinkedCodeBlock->codeType() == FunctionCode) 747 emitPutCodeBlockToFrameInPrologue(); 747 748 748 749 Label beginLabel(this); 749 750 750 int frameTopOffset = stackPointerOffsetFor(m_ codeBlock) * sizeof(Register);751 int frameTopOffset = stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register); 751 752 unsigned maxFrameSize = -frameTopOffset; 752 753 addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); … … 759 760 checkStackPointerAlignment(); 760 761 761 emitSaveCalleeSaves ();762 emitSaveCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters()); 762 763 emitMaterializeTagCheckRegisters(); 763 764 if (m_codeBlock->codeType() == FunctionCode) { 764 emitMaterializeMetadataAndConstantPoolRegisters(); 765 766 if (m_unlinkedCodeBlock->codeType() == FunctionCode) { 765 767 ASSERT(!m_bytecodeIndex); 766 if (shouldEmitProfiling()) { 767 for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) { 768 if (shouldEmitProfiling() && (!m_unlinkedCodeBlock->isConstructor() || m_unlinkedCodeBlock->numParameters() > 1)) { 769 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT2); 770 loadPtr(Address(regT2, CodeBlock::offsetOfArgumentValueProfiles() + FixedVector<ValueProfile>::offsetOfStorage()), regT2); 771 772 for (unsigned argument = 0; argument < m_unlinkedCodeBlock->numParameters(); ++argument) { 768 773 // If this is a constructor, then we want to put in a dummy profiling site (to 769 774 // keep things consistent) but we don't actually want to record the dummy value. 770 if (m_ codeBlock->isConstructor() && !argument)775 if (m_unlinkedCodeBlock->isConstructor() && !argument) 771 776 continue; 772 777 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); … … 779 784 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR()); 780 785 #endif 781 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), resultRegs);786 storeValue(resultRegs, Address(regT2, argument * sizeof(ValueProfile) + ValueProfile::offsetOfFirstBucket())); 782 787 } 783 788 } 784 789 } 785 790 786 RELEASE_ASSERT(!JITCode::isJIT(m_ codeBlock->jitType()));791 RELEASE_ASSERT(!JITCode::isJIT(m_profiledCodeBlock->jitType())); 787 792 788 793 if (UNLIKELY(sizeMarker)) … … 801 806 if (maxFrameExtentForSlowPathCall) 802 807 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 803 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); 808 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT0); 809 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, regT0); 804 810 805 811 // If the number of parameters is 1, we never require arity fixup. 806 bool requiresArityFixup = m_ codeBlock->m_numParameters!= 1;807 if (m_ codeBlock->codeType() == FunctionCode && requiresArityFixup) {812 bool requiresArityFixup = m_unlinkedCodeBlock->numParameters() != 1; 813 if (m_unlinkedCodeBlock->codeType() == FunctionCode && requiresArityFixup) { 808 814 m_arityCheck = label(); 809 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); 815 810 816 emitFunctionPrologue(); 811 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 817 emitPutCodeBlockToFrameInPrologue(regT0); 818 store8(TrustedImm32(0), Address(regT0, CodeBlock::offsetOfShouldAlwaysBeInlined())); 812 819 813 820 load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1); 814 branch32(AboveOrEqual, regT1, TrustedImm32(m_ codeBlock->m_numParameters)).linkTo(beginLabel, this);821 branch32(AboveOrEqual, regT1, TrustedImm32(m_unlinkedCodeBlock->numParameters())).linkTo(beginLabel, this); 815 822 816 823 m_bytecodeIndex = BytecodeIndex(0); … … 818 825 if (maxFrameExtentForSlowPathCall) 819 826 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 820 callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, m_codeBlock->globalObject()); 827 loadPtr(Address(regT0, CodeBlock::offsetOfGlobalObject()), argumentGPR0); 828 callOperationWithCallFrameRollbackOnException(m_unlinkedCodeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, argumentGPR0); 821 829 if (maxFrameExtentForSlowPathCall) 822 830 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); … … 841 849 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); 842 850 843 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_ codeBlock, LinkBuffer::Profile::BaselineJIT, effort));851 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_unlinkedCodeBlock, LinkBuffer::Profile::BaselineJIT, effort)); 844 852 link(); 845 853 } … … 860 868 case SwitchRecord::Immediate: 861 869 case SwitchRecord::Character: { 862 const UnlinkedSimpleJumpTable& unlinkedTable = m_ codeBlock->unlinkedSwitchJumpTable(tableIndex);863 SimpleJumpTable& linkedTable = m_ codeBlock->switchJumpTable(tableIndex);870 const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex); 871 SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex]; 864 872 linkedTable.m_ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 865 873 for (unsigned j = 0; j < unlinkedTable.m_branchOffsets.size(); ++j) { … … 873 881 874 882 case SwitchRecord::String: { 875 const UnlinkedStringJumpTable& unlinkedTable = m_ codeBlock->unlinkedStringSwitchJumpTable(tableIndex);876 StringJumpTable& linkedTable = m_ codeBlock->stringSwitchJumpTable(tableIndex);883 const UnlinkedStringJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex); 884 StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex]; 877 885 auto ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 878 886 for (auto& location : unlinkedTable.m_offsetTable.values()) { … … 908 916 } 909 917 918 #if USE(JSVALUE64) 919 auto finalizeICs = [&] (auto& generators) { 920 for (auto& gen : generators) { 921 gen.m_unlinkedStubInfo->start = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_start); 922 gen.m_unlinkedStubInfo->doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(gen.m_done); 923 gen.m_unlinkedStubInfo->slowPathStartLocation = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_slowPathBegin); 924 } 925 }; 926 927 finalizeICs(m_getByIds); 928 finalizeICs(m_getByVals); 929 finalizeICs(m_getByIdsWithThis); 930 finalizeICs(m_putByIds); 931 finalizeICs(m_putByVals); 932 finalizeICs(m_delByIds); 933 finalizeICs(m_delByVals); 934 finalizeICs(m_inByIds); 935 finalizeICs(m_inByVals); 936 finalizeICs(m_instanceOfs); 937 finalizeICs(m_privateBrandAccesses); 938 #else 910 939 finalizeInlineCaches(m_getByIds, patchBuffer); 911 940 finalizeInlineCaches(m_getByVals, patchBuffer); … … 919 948 finalizeInlineCaches(m_instanceOfs, patchBuffer); 920 949 finalizeInlineCaches(m_privateBrandAccesses, patchBuffer); 950 #endif 921 951 922 952 for (auto& compilationInfo : m_callCompilationInfo) { 953 #if USE(JSVALUE64) 954 UnlinkedCallLinkInfo& info = *compilationInfo.unlinkedCallLinkInfo; 955 info.doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation); 956 #else 923 957 CallLinkInfo& info = *compilationInfo.callLinkInfo; 924 958 info.setCodeLocations( 925 959 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.slowPathStart), 926 960 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation)); 927 } 928 929 { 930 JITCodeMapBuilder jitCodeMapBuilder; 931 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 932 if (m_labels[bytecodeOffset].isSet()) 933 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 934 } 935 m_codeBlock->setJITCodeMap(jitCodeMapBuilder.finalize()); 961 #endif 962 963 } 964 965 JITCodeMapBuilder jitCodeMapBuilder; 966 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 967 if (m_labels[bytecodeOffset].isSet()) 968 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 936 969 } 937 970 … … 942 975 943 976 if (UNLIKELY(m_compilation)) { 977 // FIXME: should we make the bytecode profiler know about UnlinkedCodeBlock? 944 978 if (Options::disassembleBaselineForProfiler()) 945 979 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); 946 m_vm->m_perBytecodeProfiler->addCompilation(m_ codeBlock, *m_compilation);980 m_vm->m_perBytecodeProfiler->addCompilation(m_profiledCodeBlock, *m_compilation); 947 981 } 948 982 … … 950 984 m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer); 951 985 986 // FIXME: Make a version of CodeBlockWithJITType that knows about UnlinkedCodeBlock. 952 987 CodeRef<JSEntryPtrTag> result = FINALIZE_CODE( 953 988 patchBuffer, JSEntryPtrTag, 954 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_ codeBlock, JITType::BaselineJIT)).data());989 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_profiledCodeBlock, JITType::BaselineJIT)).data()); 955 990 956 991 MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); 957 m_jitCode = adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)); 992 m_jitCode = adoptRef(*new BaselineJITCode(result, withArityCheck)); 993 994 m_jitCode->m_unlinkedCalls = WTFMove(m_unlinkedCalls); 995 m_jitCode->m_evalCallLinkInfos = WTFMove(m_evalCallLinkInfos); 996 m_jitCode->m_unlinkedStubInfos = WTFMove(m_unlinkedStubInfos); 997 m_jitCode->m_switchJumpTables = WTFMove(m_switchJumpTables); 998 m_jitCode->m_stringSwitchJumpTables = WTFMove(m_stringSwitchJumpTables); 999 m_jitCode->m_jitCodeMap = jitCodeMapBuilder.finalize(); 1000 m_jitCode->adoptMathICs(m_mathICs); 1001 m_jitCode->m_constantPool = WTFMove(m_constantPool); 1002 #if USE(JSVALUE64) 1003 m_jitCode->m_isShareable = m_isShareable; 1004 #else 1005 m_jitCode->m_isShareable = false; 1006 #endif 958 1007 959 1008 if (JITInternal::verbose) 960 dataLogF("JIT generated code for %p at [%p, %p).\n", m_ codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());961 } 962 963 CompilationResult JIT::finalizeOnMainThread( )1009 dataLogF("JIT generated code for %p at [%p, %p).\n", m_unlinkedCodeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); 1010 } 1011 1012 CompilationResult JIT::finalizeOnMainThread(CodeBlock* codeBlock) 964 1013 { 965 1014 RELEASE_ASSERT(!isCompilationThread()); … … 970 1019 m_linkBuffer->runMainThreadFinalizationTasks(); 971 1020 972 {973 ConcurrentJSLocker locker(m_codeBlock->m_lock);974 m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink);975 }976 977 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {978 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);979 // FIXME: <rdar://problem/39433318>.980 handler.nativeCode = m_codeBlock->jitCodeMap().find(BytecodeIndex(handler.target)).retagged<ExceptionHandlerPtrTag>();981 }982 983 1021 if (m_pcToCodeOriginMap) 984 m_ codeBlock->setPCToCodeOriginMap(WTFMove(m_pcToCodeOriginMap));1022 m_jitCode->m_pcToCodeOriginMap = WTFMove(m_pcToCodeOriginMap); 985 1023 986 1024 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( 987 1025 static_cast<double>(m_jitCode->size()) / 988 static_cast<double>(m_ codeBlock->instructionsSize()));989 990 m_codeBlock->setJITCode(m_jitCode.releaseNonNull());1026 static_cast<double>(m_unlinkedCodeBlock->instructionsSize())); 1027 1028 codeBlock->setupWithUnlinkedBaselineCode(m_jitCode.releaseNonNull()); 991 1029 992 1030 return CompilationSuccessful; … … 1000 1038 } 1001 1039 1002 CompilationResult JIT::privateCompile( JITCompilationEffort effort)1040 CompilationResult JIT::privateCompile(CodeBlock* codeBlock, JITCompilationEffort effort) 1003 1041 { 1004 1042 doMainThreadPreparationBeforeCompile(); 1005 1043 compileAndLinkWithoutFinalizing(effort); 1006 return finalizeOnMainThread( );1044 return finalizeOnMainThread(codeBlock); 1007 1045 } 1008 1046 … … 1044 1082 } 1045 1083 1084 unsigned JIT::frameRegisterCountFor(UnlinkedCodeBlock* codeBlock) 1085 { 1086 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); 1087 1088 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); 1089 } 1090 1046 1091 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) 1047 1092 { 1048 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); 1049 1050 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); 1093 return frameRegisterCountFor(codeBlock->unlinkedCodeBlock()); 1094 } 1095 1096 int JIT::stackPointerOffsetFor(UnlinkedCodeBlock* codeBlock) 1097 { 1098 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); 1051 1099 } 1052 1100 1053 1101 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) 1054 1102 { 1055 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();1103 return stackPointerOffsetFor(codeBlock->unlinkedCodeBlock()); 1056 1104 } 1057 1105
Note:
See TracChangeset
for help on using the changeset viewer.