Changeset 190860 in webkit for trunk/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
- Timestamp:
- Oct 12, 2015, 10:56:26 AM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
r188932 r190860 1 1 /* 2 * Copyright (C) 2013 , 2014Apple Inc. All rights reserved.2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 31 31 #include "CCallHelpers.h" 32 32 #include "FTLState.h" 33 #include "FTLThunks.h" 33 34 #include "GPRInfo.h" 34 35 #include "JSCInlines.h" … … 36 37 namespace JSC { namespace FTL { 37 38 38 namespace {39 40 39 // This code relies on us being 64-bit. FTL is currently always 64-bit. 41 40 static const size_t wordSize = 8; 42 41 43 // This will be an RAII thingy that will set up the necessary stack sizes and offsets and such. 44 class CallContext { 45 public: 46 CallContext( 47 State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, 48 unsigned numArgs, GPRReg returnRegister) 49 : m_state(state) 50 , m_usedRegisters(usedRegisters) 51 , m_jit(jit) 52 , m_numArgs(numArgs) 53 , m_returnRegister(returnRegister) 54 { 55 // We don't care that you're using callee-save, stack, or hardware registers. 56 m_usedRegisters.exclude(RegisterSet::stackRegisters()); 57 m_usedRegisters.exclude(RegisterSet::reservedHardwareRegisters()); 58 m_usedRegisters.exclude(RegisterSet::calleeSaveRegisters()); 42 SlowPathCallContext::SlowPathCallContext( 43 RegisterSet usedRegisters, CCallHelpers& jit, unsigned numArgs, GPRReg returnRegister) 44 : m_jit(jit) 45 , m_numArgs(numArgs) 46 , m_returnRegister(returnRegister) 47 { 48 // We don't care that you're using callee-save, stack, or hardware registers. 49 usedRegisters.exclude(RegisterSet::stackRegisters()); 50 usedRegisters.exclude(RegisterSet::reservedHardwareRegisters()); 51 usedRegisters.exclude(RegisterSet::calleeSaveRegisters()); 59 52 60 61 62 m_usedRegisters.clear(m_returnRegister);53 // The return register doesn't need to be saved. 54 if (m_returnRegister != InvalidGPRReg) 55 usedRegisters.clear(m_returnRegister); 63 56 64 57 size_t stackBytesNeededForReturnAddress = wordSize; 65 58 66 67 59 m_offsetToSavingArea = 60 (std::max(m_numArgs, NUMBER_OF_ARGUMENT_REGISTERS) - NUMBER_OF_ARGUMENT_REGISTERS) * wordSize; 68 61 69 70 71 72 73 74 m_callingConventionRegisters.filter(m_usedRegisters);62 for (unsigned i = std::min(NUMBER_OF_ARGUMENT_REGISTERS, numArgs); i--;) 63 m_argumentRegisters.set(GPRInfo::toArgumentRegister(i)); 64 m_callingConventionRegisters.merge(m_argumentRegisters); 65 if (returnRegister != InvalidGPRReg) 66 m_callingConventionRegisters.set(GPRInfo::returnValueGPR); 67 m_callingConventionRegisters.filter(usedRegisters); 75 68 76 77 69 unsigned numberOfCallingConventionRegisters = 70 m_callingConventionRegisters.numberOfSetRegisters(); 78 71 79 80 81 72 size_t offsetToThunkSavingArea = 73 m_offsetToSavingArea + 74 numberOfCallingConventionRegisters * wordSize; 82 75 83 84 85 86 (m_usedRegisters.numberOfSetRegisters() - numberOfCallingConventionRegisters) * wordSize;76 m_stackBytesNeeded = 77 offsetToThunkSavingArea + 78 stackBytesNeededForReturnAddress + 79 (usedRegisters.numberOfSetRegisters() - numberOfCallingConventionRegisters) * wordSize; 87 80 88 81 m_stackBytesNeeded = (m_stackBytesNeeded + stackAlignmentBytes() - 1) & ~(stackAlignmentBytes() - 1); 89 82 90 m_jit.subPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); 83 m_jit.subPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); 84 85 m_thunkSaveSet = usedRegisters; 91 86 92 m_thunkSaveSet = m_usedRegisters; 87 // This relies on all calling convention registers also being temp registers. 88 unsigned stackIndex = 0; 89 for (unsigned i = GPRInfo::numberOfRegisters; i--;) { 90 GPRReg reg = GPRInfo::toRegister(i); 91 if (!m_callingConventionRegisters.get(reg)) 92 continue; 93 m_jit.storePtr(reg, CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize)); 94 m_thunkSaveSet.clear(reg); 95 } 93 96 94 // This relies on all calling convention registers also being temp registers. 95 unsigned stackIndex = 0; 96 for (unsigned i = GPRInfo::numberOfRegisters; i--;) { 97 GPRReg reg = GPRInfo::toRegister(i); 98 if (!m_callingConventionRegisters.get(reg)) 99 continue; 100 m_jit.storePtr(reg, CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize)); 101 m_thunkSaveSet.clear(reg); 102 } 103 104 m_offset = offsetToThunkSavingArea; 97 m_offset = offsetToThunkSavingArea; 98 } 99 100 SlowPathCallContext::~SlowPathCallContext() 101 { 102 if (m_returnRegister != InvalidGPRReg) 103 m_jit.move(GPRInfo::returnValueGPR, m_returnRegister); 104 105 unsigned stackIndex = 0; 106 for (unsigned i = GPRInfo::numberOfRegisters; i--;) { 107 GPRReg reg = GPRInfo::toRegister(i); 108 if (!m_callingConventionRegisters.get(reg)) 109 continue; 110 m_jit.loadPtr(CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize), reg); 105 111 } 106 112 107 ~CallContext() 108 { 109 if (m_returnRegister != InvalidGPRReg) 110 m_jit.move(GPRInfo::returnValueGPR, m_returnRegister); 111 112 unsigned stackIndex = 0; 113 for (unsigned i = GPRInfo::numberOfRegisters; i--;) { 114 GPRReg reg = GPRInfo::toRegister(i); 115 if (!m_callingConventionRegisters.get(reg)) 116 continue; 117 m_jit.loadPtr(CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize), reg); 118 } 119 120 m_jit.addPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); 121 } 122 123 RegisterSet usedRegisters() const 124 { 125 return m_thunkSaveSet; 126 } 127 128 ptrdiff_t offset() const 129 { 130 return m_offset; 131 } 132 133 SlowPathCallKey keyWithTarget(void* callTarget) const 134 { 135 return SlowPathCallKey(usedRegisters(), callTarget, m_argumentRegisters, offset()); 136 } 137 138 MacroAssembler::Call makeCall(void* callTarget, MacroAssembler::JumpList* exceptionTarget) 139 { 140 MacroAssembler::Call result = m_jit.call(); 141 m_state.finalizer->slowPathCalls.append(SlowPathCall( 142 result, keyWithTarget(callTarget))); 143 if (exceptionTarget) 144 exceptionTarget->append(m_jit.emitExceptionCheck()); 145 return result; 146 } 147 148 private: 149 State& m_state; 150 RegisterSet m_usedRegisters; 151 RegisterSet m_argumentRegisters; 152 RegisterSet m_callingConventionRegisters; 153 CCallHelpers& m_jit; 154 unsigned m_numArgs; 155 GPRReg m_returnRegister; 156 size_t m_offsetToSavingArea; 157 size_t m_stackBytesNeeded; 158 RegisterSet m_thunkSaveSet; 159 ptrdiff_t m_offset; 160 }; 161 162 } // anonymous namespace 163 164 void storeCodeOrigin(State& state, CCallHelpers& jit, CodeOrigin codeOrigin) 165 { 166 if (!codeOrigin.isSet()) 167 return; 168 169 CallSiteIndex callSite = state.jitCode->common.addCodeOrigin(codeOrigin); 170 unsigned locationBits = callSite.bits(); 171 jit.store32( 172 CCallHelpers::TrustedImm32(locationBits), 173 CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); 113 m_jit.addPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); 174 114 } 175 115 176 MacroAssembler::Call callOperation( 177 State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, 178 CodeOrigin codeOrigin, MacroAssembler::JumpList* exceptionTarget, 179 J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, 180 GPRReg object, const UniquedStringImpl* uid) 116 SlowPathCallKey SlowPathCallContext::keyWithTarget(void* callTarget) const 181 117 { 182 storeCodeOrigin(state, jit, codeOrigin); 183 CallContext context(state, usedRegisters, jit, 4, result); 184 jit.setupArgumentsWithExecState( 185 CCallHelpers::TrustedImmPtr(stubInfo), object, CCallHelpers::TrustedImmPtr(uid)); 186 return context.makeCall(bitwise_cast<void*>(operation), exceptionTarget); 118 return SlowPathCallKey(m_thunkSaveSet, callTarget, m_argumentRegisters, m_offset); 187 119 } 188 120 189 MacroAssembler::Call callOperation( 190 State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, 191 CodeOrigin codeOrigin, MacroAssembler::JumpList* exceptionTarget, 192 J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, 193 GPRReg object, UniquedStringImpl* uid) 121 SlowPathCall SlowPathCallContext::makeCall(void* callTarget) 194 122 { 195 storeCodeOrigin(state, jit, codeOrigin); 196 CallContext context(state, usedRegisters, jit, 4, result); 197 jit.setupArgumentsWithExecState( 198 CCallHelpers::TrustedImmPtr(stubInfo), object, 199 CCallHelpers::TrustedImmPtr(uid)); 200 return context.makeCall(bitwise_cast<void*>(operation), exceptionTarget); 123 SlowPathCall result = SlowPathCall(m_jit.call(), keyWithTarget(callTarget)); 124 125 m_jit.addLinkTask( 126 [result] (LinkBuffer& linkBuffer) { 127 VM& vm = linkBuffer.vm(); 128 129 MacroAssemblerCodeRef thunk = 130 vm.ftlThunks->getSlowPathCallThunk(vm, result.key()); 131 132 linkBuffer.link(result.call(), CodeLocationLabel(thunk.code())); 133 }); 134 135 return result; 201 136 } 202 137 203 MacroAssembler::Call callOperation( 204 State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, 205 CodeOrigin codeOrigin, MacroAssembler::JumpList* exceptionTarget, 206 V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg value, 207 GPRReg object, UniquedStringImpl* uid) 138 CallSiteIndex callSiteIndexForCodeOrigin(State& state, CodeOrigin codeOrigin) 208 139 { 209 storeCodeOrigin(state, jit, codeOrigin); 210 CallContext context(state, usedRegisters, jit, 5, InvalidGPRReg); 211 jit.setupArgumentsWithExecState( 212 CCallHelpers::TrustedImmPtr(stubInfo), value, object, 213 CCallHelpers::TrustedImmPtr(uid)); 214 return context.makeCall(bitwise_cast<void*>(operation), exceptionTarget); 140 if (codeOrigin) 141 return state.jitCode->common.addCodeOrigin(codeOrigin); 142 return CallSiteIndex(); 215 143 } 216 144
Note:
See TracChangeset
for help on using the changeset viewer.