Changeset 283083 in webkit for trunk/Source/JavaScriptCore/jit/JIT.cpp
- Timestamp:
- Sep 25, 2021, 2:55:41 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/jit/JIT.cpp
r282239 r283083 70 70 71 71 JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex) 72 : JSInterfaceJIT(&vm, codeBlock)72 : JSInterfaceJIT(&vm, nullptr) 73 73 , m_interpreter(vm.interpreter) 74 74 , m_labels(codeBlock ? codeBlock->instructions().size() : 0) … … 78 78 , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex) 79 79 { 80 m_globalObjectConstant = m_constantPool.add(JITConstantPool::Type::GlobalObject); 81 m_profiledCodeBlock = codeBlock; 82 m_unlinkedCodeBlock = codeBlock->unlinkedCodeBlock(); 80 83 } 81 84 … … 91 94 92 95 JumpList skipOptimize; 93 94 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), A bsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));96 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0); 97 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(regT0, CodeBlock::offsetOfJITExecuteCounter()))); 95 98 ASSERT(!m_bytecodeIndex.offset()); 96 99 … … 114 117 } 115 118 116 void JIT::emitNotifyWrite(GPRReg pointerToSet) 117 { 119 void JIT::emitNotifyWriteWatchpoint(GPRReg pointerToSet) 120 { 121 auto ok = branchTestPtr(Zero, pointerToSet); 118 122 addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 119 } 120 121 void JIT::emitVarReadOnlyCheck(ResolveType resolveType) 122 { 123 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) 124 addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varReadOnlyWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); 123 ok.link(this); 124 } 125 126 void JIT::emitVarReadOnlyCheck(ResolveType resolveType, GPRReg scratchGPR) 127 { 128 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) { 129 loadGlobalObject(scratchGPR); 130 loadPtr(Address(scratchGPR, OBJECT_OFFSETOF(JSGlobalObject, m_varReadOnlyWatchpoint)), scratchGPR); 131 addSlowCase(branch8(Equal, Address(scratchGPR, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); 132 } 125 133 } 126 134 … … 130 138 return; 131 139 132 addPtr(TrustedImm32(stackPointerOffsetFor(m_ codeBlock) * sizeof(Register)), callFrameRegister, regT0);140 addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, regT0); 133 141 Jump ok = branchPtr(Equal, regT0, stackPointerRegister); 134 142 breakpoint(); 135 143 ok.link(this); 144 } 145 146 void JIT::resetSP() 147 { 148 addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); 149 checkStackPointerAlignment(); 136 150 } 137 151 … … 182 196 } 183 197 198 void JIT::emitPutCodeBlockToFrameInPrologue(GPRReg result) 199 { 200 RELEASE_ASSERT(m_unlinkedCodeBlock->codeType() == FunctionCode); 201 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, result); 202 loadPtr(Address(result, JSFunction::offsetOfExecutableOrRareData()), result); 203 auto hasExecutable = branchTestPtr(Zero, result, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)); 204 loadPtr(Address(result, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), result); 205 hasExecutable.link(this); 206 if (m_unlinkedCodeBlock->isConstructor()) 207 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForConstruct()), result); 208 else 209 loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForCall()), result); 210 211 loadPtr(Address(result, ExecutableToCodeBlockEdge::offsetOfCodeBlock()), result); 212 emitPutToCallFrameHeader(result, CallFrameSlot::codeBlock); 213 214 #if ASSERT_ENABLED 215 probeDebug([=] (Probe::Context& ctx) { 216 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 217 RELEASE_ASSERT(codeBlock->jitType() == JITType::BaselineJIT); 218 }); 219 #endif 220 } 221 184 222 void JIT::privateCompileMainPass() 185 223 { 186 224 if (JITInternal::verbose) 187 dataLog("Compiling ", *m_ codeBlock, "\n");225 dataLog("Compiling ", *m_profiledCodeBlock, "\n"); 188 226 189 227 jitAssertTagsInPlace(); 190 228 jitAssertArgumentCountSane(); 191 229 192 auto& instructions = m_ codeBlock->instructions();193 unsigned instructionCount = m_ codeBlock->instructions().size();230 auto& instructions = m_unlinkedCodeBlock->instructions(); 231 unsigned instructionCount = m_unlinkedCodeBlock->instructions().size(); 194 232 195 233 m_callLinkInfoIndex = 0; 196 234 197 VM& vm = m_codeBlock->vm();198 235 BytecodeIndex startBytecodeIndex(0); 199 if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {200 // We can only do this optimization because we execute ProgramCodeBlock's exactly once.201 // This optimization would be invalid otherwise. When the LLInt determines it wants to202 // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it203 // was executing at when it kicked off our compilation. We only need to compile code for204 // anything reachable from that bytecode offset.205 206 // We only bother building the bytecode graph if it could save time and executable207 // memory. We pick an arbitrary offset where we deem this is profitable.208 if (m_loopOSREntryBytecodeIndex.offset() >= 200) {209 // As a simplification, we don't find all bytecode ranges that are unreachable.210 // Instead, we just find the minimum bytecode offset that is reachable, and211 // compile code from that bytecode offset onwards.212 213 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());214 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset());215 RELEASE_ASSERT(block);216 217 GraphNodeWorklist<BytecodeBasicBlock*> worklist;218 startBytecodeIndex = BytecodeIndex();219 worklist.push(block);220 221 while (BytecodeBasicBlock* block = worklist.pop()) {222 startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset()));223 for (unsigned successorIndex : block->successors())224 worklist.push(&graph[successorIndex]);225 226 // Also add catch blocks for bytecodes that throw.227 if (m_codeBlock->numberOfExceptionHandlers()) {228 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {229 auto instruction = instructions.at(bytecodeOffset);230 if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset)))231 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));232 233 bytecodeOffset += instruction->size();234 }235 }236 }237 }238 }239 236 240 237 m_bytecodeCountHavingSlowCase = 0; … … 279 276 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 280 277 if (UNLIKELY(Options::traceBaselineJITExecution())) { 281 CodeBlock* codeBlock = m_codeBlock;282 278 probeDebug([=] (Probe::Context& ctx) { 279 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 283 280 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 284 281 }); 285 282 } 283 284 if (opcodeID != op_catch) 285 assertStackPointerOffset(); 286 286 287 287 switch (opcodeID) { … … 528 528 BytecodeIndex firstTo = m_bytecodeIndex; 529 529 530 const Instruction* currentInstruction = m_ codeBlock->instructions().at(m_bytecodeIndex).ptr();530 const Instruction* currentInstruction = m_unlinkedCodeBlock->instructions().at(m_bytecodeIndex).ptr(); 531 531 532 532 if (JITInternal::verbose) … … 546 546 if (UNLIKELY(Options::traceBaselineJITExecution())) { 547 547 unsigned bytecodeOffset = m_bytecodeIndex.offset(); 548 CodeBlock* codeBlock = m_codeBlock;549 548 probeDebug([=] (Probe::Context& ctx) { 549 CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock(); 550 550 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); 551 551 }); … … 675 675 } 676 676 677 void JIT::emitMaterializeMetadataAndConstantPoolRegisters() 678 { 679 loadPtr(addressFor(CallFrameSlot::codeBlock), regT0); 680 loadPtr(Address(regT0, CodeBlock::offsetOfMetadataTable()), s_metadataGPR); 681 loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0); 682 loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR); 683 } 684 685 void JIT::emitRestoreCalleeSaves() 686 { 687 Base::emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters()); 688 } 689 677 690 void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort) 678 691 { 679 DFG::CapabilityLevel level = m_ codeBlock->capabilityLevel();692 DFG::CapabilityLevel level = m_profiledCodeBlock->capabilityLevel(); 680 693 switch (level) { 681 694 case DFG::CannotCompile: 682 695 m_canBeOptimized = false; 683 m_canBeOptimizedOrInlined = false;684 696 m_shouldEmitProfiling = false; 685 697 break; … … 687 699 case DFG::CanCompileAndInline: 688 700 m_canBeOptimized = true; 689 m_canBeOptimizedOrInlined = true;690 701 m_shouldEmitProfiling = true; 691 702 break; … … 694 705 break; 695 706 } 696 697 switch (m_codeBlock->codeType()) { 698 case GlobalCode: 699 case ModuleCode: 700 case EvalCode: 701 m_codeBlock->m_shouldAlwaysBeInlined = false; 702 break; 703 case FunctionCode: 704 // We could have already set it to false because we detected an uninlineable call. 705 // Don't override that observation. 706 m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); 707 break; 708 } 709 710 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables() || m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 711 ConcurrentJSLocker locker(m_codeBlock->m_lock); 712 if (m_codeBlock->numberOfUnlinkedSwitchJumpTables()) 713 m_codeBlock->ensureJITData(locker).m_switchJumpTables = FixedVector<SimpleJumpTable>(m_codeBlock->numberOfUnlinkedSwitchJumpTables()); 714 if (m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) 715 m_codeBlock->ensureJITData(locker).m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()); 716 } 717 718 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) 719 m_disassembler = makeUnique<JITDisassembler>(m_codeBlock); 707 708 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables() || m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) { 709 if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()) 710 m_switchJumpTables = FixedVector<SimpleJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables()); 711 if (m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) 712 m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()); 713 } 714 715 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) { 716 // FIXME: build a disassembler off of UnlinkedCodeBlock. 717 m_disassembler = makeUnique<JITDisassembler>(m_profiledCodeBlock); 718 } 720 719 if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { 720 // FIXME: build profiler disassembler off UnlinkedCodeBlock. 721 721 m_compilation = adoptRef( 722 722 new Profiler::Compilation( 723 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_ codeBlock),723 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_profiledCodeBlock), 724 724 Profiler::Baseline)); 725 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_ codeBlock);725 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_profiledCodeBlock); 726 726 } 727 727 … … 743 743 744 744 emitFunctionPrologue(); 745 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 745 if (m_unlinkedCodeBlock->codeType() == FunctionCode) 746 emitPutCodeBlockToFrameInPrologue(); 746 747 747 748 Label beginLabel(this); 748 749 749 int frameTopOffset = stackPointerOffsetFor(m_ codeBlock) * sizeof(Register);750 int frameTopOffset = stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register); 750 751 unsigned maxFrameSize = -frameTopOffset; 751 752 addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); … … 758 759 checkStackPointerAlignment(); 759 760 760 emitSaveCalleeSaves ();761 emitSaveCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters()); 761 762 emitMaterializeTagCheckRegisters(); 762 763 if (m_codeBlock->codeType() == FunctionCode) { 763 emitMaterializeMetadataAndConstantPoolRegisters(); 764 765 if (m_unlinkedCodeBlock->codeType() == FunctionCode) { 764 766 ASSERT(!m_bytecodeIndex); 765 if (shouldEmitProfiling()) { 766 for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) { 767 if (shouldEmitProfiling() && (!m_unlinkedCodeBlock->isConstructor() || m_unlinkedCodeBlock->numParameters() > 1)) { 768 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT2); 769 loadPtr(Address(regT2, CodeBlock::offsetOfArgumentValueProfiles() + FixedVector<ValueProfile>::offsetOfStorage()), regT2); 770 771 for (unsigned argument = 0; argument < m_unlinkedCodeBlock->numParameters(); ++argument) { 767 772 // If this is a constructor, then we want to put in a dummy profiling site (to 768 773 // keep things consistent) but we don't actually want to record the dummy value. 769 if (m_ codeBlock->isConstructor() && !argument)774 if (m_unlinkedCodeBlock->isConstructor() && !argument) 770 775 continue; 771 776 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); … … 778 783 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR()); 779 784 #endif 780 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), resultRegs);785 storeValue(resultRegs, Address(regT2, argument * sizeof(ValueProfile) + ValueProfile::offsetOfFirstBucket())); 781 786 } 782 787 } 783 788 } 784 789 785 RELEASE_ASSERT(!JITCode::isJIT(m_ codeBlock->jitType()));790 RELEASE_ASSERT(!JITCode::isJIT(m_profiledCodeBlock->jitType())); 786 791 787 792 if (UNLIKELY(sizeMarker)) … … 800 805 if (maxFrameExtentForSlowPathCall) 801 806 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 802 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); 807 emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT0); 808 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, regT0); 803 809 804 810 // If the number of parameters is 1, we never require arity fixup. 805 bool requiresArityFixup = m_ codeBlock->m_numParameters!= 1;806 if (m_ codeBlock->codeType() == FunctionCode && requiresArityFixup) {811 bool requiresArityFixup = m_unlinkedCodeBlock->numParameters() != 1; 812 if (m_unlinkedCodeBlock->codeType() == FunctionCode && requiresArityFixup) { 807 813 m_arityCheck = label(); 808 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); 814 809 815 emitFunctionPrologue(); 810 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 816 emitPutCodeBlockToFrameInPrologue(regT0); 817 store8(TrustedImm32(0), Address(regT0, CodeBlock::offsetOfShouldAlwaysBeInlined())); 811 818 812 819 load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1); 813 branch32(AboveOrEqual, regT1, TrustedImm32(m_ codeBlock->m_numParameters)).linkTo(beginLabel, this);820 branch32(AboveOrEqual, regT1, TrustedImm32(m_unlinkedCodeBlock->numParameters())).linkTo(beginLabel, this); 814 821 815 822 m_bytecodeIndex = BytecodeIndex(0); … … 817 824 if (maxFrameExtentForSlowPathCall) 818 825 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 819 callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, m_codeBlock->globalObject()); 826 loadPtr(Address(regT0, CodeBlock::offsetOfGlobalObject()), argumentGPR0); 827 callOperationWithCallFrameRollbackOnException(m_unlinkedCodeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, argumentGPR0); 820 828 if (maxFrameExtentForSlowPathCall) 821 829 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); … … 840 848 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); 841 849 842 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_ codeBlock, LinkBuffer::Profile::BaselineJIT, effort));850 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_unlinkedCodeBlock, LinkBuffer::Profile::BaselineJIT, effort)); 843 851 link(); 844 852 } … … 859 867 case SwitchRecord::Immediate: 860 868 case SwitchRecord::Character: { 861 const UnlinkedSimpleJumpTable& unlinkedTable = m_ codeBlock->unlinkedSwitchJumpTable(tableIndex);862 SimpleJumpTable& linkedTable = m_ codeBlock->switchJumpTable(tableIndex);869 const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex); 870 SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex]; 863 871 linkedTable.m_ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 864 872 for (unsigned j = 0; j < unlinkedTable.m_branchOffsets.size(); ++j) { … … 872 880 873 881 case SwitchRecord::String: { 874 const UnlinkedStringJumpTable& unlinkedTable = m_ codeBlock->unlinkedStringSwitchJumpTable(tableIndex);875 StringJumpTable& linkedTable = m_ codeBlock->stringSwitchJumpTable(tableIndex);882 const UnlinkedStringJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex); 883 StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex]; 876 884 auto ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); 877 885 for (auto& location : unlinkedTable.m_offsetTable.values()) { … … 907 915 } 908 916 917 #if USE(JSVALUE64) 918 auto finalizeICs = [&] (auto& generators) { 919 for (auto& gen : generators) { 920 gen.m_unlinkedStubInfo->start = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_start); 921 gen.m_unlinkedStubInfo->doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(gen.m_done); 922 gen.m_unlinkedStubInfo->slowPathStartLocation = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_slowPathBegin); 923 } 924 }; 925 926 finalizeICs(m_getByIds); 927 finalizeICs(m_getByVals); 928 finalizeICs(m_getByIdsWithThis); 929 finalizeICs(m_putByIds); 930 finalizeICs(m_putByVals); 931 finalizeICs(m_delByIds); 932 finalizeICs(m_delByVals); 933 finalizeICs(m_inByIds); 934 finalizeICs(m_inByVals); 935 finalizeICs(m_instanceOfs); 936 finalizeICs(m_privateBrandAccesses); 937 #else 909 938 finalizeInlineCaches(m_getByIds, patchBuffer); 910 939 finalizeInlineCaches(m_getByVals, patchBuffer); … … 918 947 finalizeInlineCaches(m_instanceOfs, patchBuffer); 919 948 finalizeInlineCaches(m_privateBrandAccesses, patchBuffer); 949 #endif 920 950 921 951 for (auto& compilationInfo : m_callCompilationInfo) { 952 #if USE(JSVALUE64) 953 UnlinkedCallLinkInfo& info = *compilationInfo.unlinkedCallLinkInfo; 954 info.doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation); 955 #else 922 956 CallLinkInfo& info = *compilationInfo.callLinkInfo; 923 957 info.setCodeLocations( 924 958 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.slowPathStart), 925 959 patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation)); 926 } 927 928 { 929 JITCodeMapBuilder jitCodeMapBuilder; 930 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 931 if (m_labels[bytecodeOffset].isSet()) 932 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 933 } 934 m_codeBlock->setJITCodeMap(jitCodeMapBuilder.finalize()); 960 #endif 961 962 } 963 964 JITCodeMapBuilder jitCodeMapBuilder; 965 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { 966 if (m_labels[bytecodeOffset].isSet()) 967 jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); 935 968 } 936 969 … … 941 974 942 975 if (UNLIKELY(m_compilation)) { 976 // FIXME: should we make the bytecode profiler know about UnlinkedCodeBlock? 943 977 if (Options::disassembleBaselineForProfiler()) 944 978 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); 945 m_vm->m_perBytecodeProfiler->addCompilation(m_ codeBlock, *m_compilation);979 m_vm->m_perBytecodeProfiler->addCompilation(m_profiledCodeBlock, *m_compilation); 946 980 } 947 981 … … 949 983 m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer); 950 984 985 // FIXME: Make a version of CodeBlockWithJITType that knows about UnlinkedCodeBlock. 951 986 CodeRef<JSEntryPtrTag> result = FINALIZE_CODE( 952 987 patchBuffer, JSEntryPtrTag, 953 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_ codeBlock, JITType::BaselineJIT)).data());988 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_profiledCodeBlock, JITType::BaselineJIT)).data()); 954 989 955 990 MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); 956 m_jitCode = adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)); 991 m_jitCode = adoptRef(*new BaselineJITCode(result, withArityCheck)); 992 993 m_jitCode->m_unlinkedCalls = WTFMove(m_unlinkedCalls); 994 m_jitCode->m_evalCallLinkInfos = WTFMove(m_evalCallLinkInfos); 995 m_jitCode->m_unlinkedStubInfos = WTFMove(m_unlinkedStubInfos); 996 m_jitCode->m_switchJumpTables = WTFMove(m_switchJumpTables); 997 m_jitCode->m_stringSwitchJumpTables = WTFMove(m_stringSwitchJumpTables); 998 m_jitCode->m_jitCodeMap = jitCodeMapBuilder.finalize(); 999 m_jitCode->adoptMathICs(m_mathICs); 1000 m_jitCode->m_constantPool = WTFMove(m_constantPool); 1001 #if USE(JSVALUE64) 1002 m_jitCode->m_isShareable = m_isShareable; 1003 #else 1004 m_jitCode->m_isShareable = false; 1005 #endif 957 1006 958 1007 if (JITInternal::verbose) 959 dataLogF("JIT generated code for %p at [%p, %p).\n", m_ codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());960 } 961 962 CompilationResult JIT::finalizeOnMainThread( )1008 dataLogF("JIT generated code for %p at [%p, %p).\n", m_unlinkedCodeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); 1009 } 1010 1011 CompilationResult JIT::finalizeOnMainThread(CodeBlock* codeBlock) 963 1012 { 964 1013 RELEASE_ASSERT(!isCompilationThread()); … … 969 1018 m_linkBuffer->runMainThreadFinalizationTasks(); 970 1019 971 {972 ConcurrentJSLocker locker(m_codeBlock->m_lock);973 m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink);974 }975 976 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {977 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);978 // FIXME: <rdar://problem/39433318>.979 handler.nativeCode = m_codeBlock->jitCodeMap().find(BytecodeIndex(handler.target)).retagged<ExceptionHandlerPtrTag>();980 }981 982 1020 if (m_pcToCodeOriginMap) 983 m_ codeBlock->setPCToCodeOriginMap(WTFMove(m_pcToCodeOriginMap));1021 m_jitCode->m_pcToCodeOriginMap = WTFMove(m_pcToCodeOriginMap); 984 1022 985 1023 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( 986 1024 static_cast<double>(m_jitCode->size()) / 987 static_cast<double>(m_ codeBlock->instructionsSize()));988 989 m_codeBlock->setJITCode(m_jitCode.releaseNonNull());1025 static_cast<double>(m_unlinkedCodeBlock->instructionsSize())); 1026 1027 codeBlock->setupWithUnlinkedBaselineCode(m_jitCode.releaseNonNull()); 990 1028 991 1029 return CompilationSuccessful; … … 999 1037 } 1000 1038 1001 CompilationResult JIT::privateCompile( JITCompilationEffort effort)1039 CompilationResult JIT::privateCompile(CodeBlock* codeBlock, JITCompilationEffort effort) 1002 1040 { 1003 1041 doMainThreadPreparationBeforeCompile(); 1004 1042 compileAndLinkWithoutFinalizing(effort); 1005 return finalizeOnMainThread( );1043 return finalizeOnMainThread(codeBlock); 1006 1044 } 1007 1045 … … 1043 1081 } 1044 1082 1083 unsigned JIT::frameRegisterCountFor(UnlinkedCodeBlock* codeBlock) 1084 { 1085 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); 1086 1087 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); 1088 } 1089 1045 1090 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) 1046 1091 { 1047 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); 1048 1049 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); 1092 return frameRegisterCountFor(codeBlock->unlinkedCodeBlock()); 1093 } 1094 1095 int JIT::stackPointerOffsetFor(UnlinkedCodeBlock* codeBlock) 1096 { 1097 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); 1050 1098 } 1051 1099 1052 1100 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) 1053 1101 { 1054 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();1102 return stackPointerOffsetFor(codeBlock->unlinkedCodeBlock()); 1055 1103 } 1056 1104
Note:
See TracChangeset
for help on using the changeset viewer.