1 | /*
|
---|
2 | * Copyright (C) 2010-2022 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
|
---|
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
---|
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
|
---|
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
---|
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
---|
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
---|
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
---|
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
---|
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
---|
23 | * THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "ThunkGenerators.h"
|
---|
28 |
|
---|
29 | #include "JITOperations.h"
|
---|
30 | #include "JITThunks.h"
|
---|
31 | #include "JSBoundFunction.h"
|
---|
32 | #include "JSRemoteFunction.h"
|
---|
33 | #include "LLIntThunks.h"
|
---|
34 | #include "MaxFrameExtentForSlowPathCall.h"
|
---|
35 | #include "SpecializedThunkJIT.h"
|
---|
36 | #include "ThunkGenerator.h"
|
---|
37 | #include <wtf/InlineASM.h>
|
---|
38 | #include <wtf/StdIntExtras.h>
|
---|
39 | #include <wtf/StringPrintStream.h>
|
---|
40 | #include <wtf/text/StringImpl.h>
|
---|
41 |
|
---|
42 | #if ENABLE(JIT)
|
---|
43 |
|
---|
44 | namespace JSC {
|
---|
45 |
|
---|
46 | MacroAssemblerCodeRef<JITThunkPtrTag> handleExceptionGenerator(VM& vm)
|
---|
47 | {
|
---|
48 | CCallHelpers jit;
|
---|
49 |
|
---|
50 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::argumentGPR0);
|
---|
51 |
|
---|
52 | jit.move(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
|
---|
53 | jit.prepareCallOperation(vm);
|
---|
54 | CCallHelpers::Call operation = jit.call(OperationPtrTag);
|
---|
55 | jit.jumpToExceptionHandler(vm);
|
---|
56 |
|
---|
57 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
58 | patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler));
|
---|
59 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "handleException");
|
---|
60 | }
|
---|
61 |
|
---|
62 | MacroAssemblerCodeRef<JITThunkPtrTag> handleExceptionWithCallFrameRollbackGenerator(VM& vm)
|
---|
63 | {
|
---|
64 | CCallHelpers jit;
|
---|
65 |
|
---|
66 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::argumentGPR0);
|
---|
67 |
|
---|
68 | jit.move(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
|
---|
69 | jit.prepareCallOperation(vm);
|
---|
70 | CCallHelpers::Call operation = jit.call(OperationPtrTag);
|
---|
71 | jit.jumpToExceptionHandler(vm);
|
---|
72 |
|
---|
73 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
74 | patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame));
|
---|
75 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "handleExceptionWithCallFrameRollback");
|
---|
76 | }
|
---|
77 |
|
---|
78 | MacroAssemblerCodeRef<JITThunkPtrTag> popThunkStackPreservesAndHandleExceptionGenerator(VM& vm)
|
---|
79 | {
|
---|
80 | CCallHelpers jit;
|
---|
81 |
|
---|
82 | jit.emitCTIThunkEpilogue();
|
---|
83 | #if CPU(X86_64) // On the x86, emitCTIThunkEpilogue leaves the return PC on the stack. Drop it.
|
---|
84 | jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CPURegister)), X86Registers::esp);
|
---|
85 | #endif
|
---|
86 |
|
---|
87 | CCallHelpers::Jump continuation = jit.jump();
|
---|
88 |
|
---|
89 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
90 | auto handler = vm.getCTIStub(handleExceptionGenerator);
|
---|
91 | patchBuffer.link(continuation, CodeLocationLabel(handler.retaggedCode<NoPtrTag>()));
|
---|
92 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "popThunkStackPreservesAndHandleException");
|
---|
93 | }
|
---|
94 |
|
---|
95 | MacroAssemblerCodeRef<JITThunkPtrTag> checkExceptionGenerator(VM& vm)
|
---|
96 | {
|
---|
97 | CCallHelpers jit;
|
---|
98 |
|
---|
99 | // This thunk is tail called from other thunks, and the return address is always already tagged
|
---|
100 |
|
---|
101 | // Exception fuzzing can call a runtime function. So, we need to preserve the return address here.
|
---|
102 | if (UNLIKELY(Options::useExceptionFuzz()))
|
---|
103 | jit.emitCTIThunkPrologue(/* returnAddressAlreadyTagged: */ true);
|
---|
104 |
|
---|
105 | CCallHelpers::Jump handleException = jit.emitNonPatchableExceptionCheck(vm);
|
---|
106 |
|
---|
107 | if (UNLIKELY(Options::useExceptionFuzz()))
|
---|
108 | jit.emitCTIThunkEpilogue();
|
---|
109 | jit.ret();
|
---|
110 |
|
---|
111 | auto handlerGenerator = Options::useExceptionFuzz() ? popThunkStackPreservesAndHandleExceptionGenerator : handleExceptionGenerator;
|
---|
112 | #if CPU(X86_64)
|
---|
113 | if (!Options::useExceptionFuzz()) {
|
---|
114 | handleException.link(&jit);
|
---|
115 | jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CPURegister)), X86Registers::esp); // pop return address.
|
---|
116 | handleException = jit.jump();
|
---|
117 | }
|
---|
118 | #endif
|
---|
119 |
|
---|
120 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
121 | patchBuffer.link(handleException, CodeLocationLabel(vm.getCTIStub(handlerGenerator).retaggedCode<NoPtrTag>()));
|
---|
122 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "CheckException");
|
---|
123 | }
|
---|
124 |
|
---|
125 | template<typename TagType>
|
---|
126 | inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR, TagType tag)
|
---|
127 | {
|
---|
128 | #if CPU(ARM64E)
|
---|
129 | if (!ASSERT_ENABLED)
|
---|
130 | return;
|
---|
131 | if (!Options::useJITCage()) {
|
---|
132 | CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
|
---|
133 | jit.abortWithReason(TGInvalidPointer);
|
---|
134 | isNonZero.link(&jit);
|
---|
135 | jit.pushToSave(pointerGPR);
|
---|
136 | jit.untagPtr(tag, pointerGPR);
|
---|
137 | jit.validateUntaggedPtr(pointerGPR);
|
---|
138 | jit.popToRestore(pointerGPR);
|
---|
139 | }
|
---|
140 | #else
|
---|
141 | UNUSED_PARAM(jit);
|
---|
142 | UNUSED_PARAM(pointerGPR);
|
---|
143 | UNUSED_PARAM(tag);
|
---|
144 | #endif
|
---|
145 | }
|
---|
146 |
|
---|
147 | // We will jump here if the JIT code tries to make a call, but the
|
---|
148 | // linking helper (C++ code) decides to throw an exception instead.
|
---|
149 | MacroAssemblerCodeRef<JITThunkPtrTag> throwExceptionFromCallSlowPathGenerator(VM& vm)
|
---|
150 | {
|
---|
151 | CCallHelpers jit;
|
---|
152 |
|
---|
153 | // The call pushed a return address, so we need to pop it back off to re-align the stack,
|
---|
154 | // even though we won't use it.
|
---|
155 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
|
---|
156 |
|
---|
157 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::argumentGPR0);
|
---|
158 |
|
---|
159 | jit.setupArguments<decltype(operationLookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&vm));
|
---|
160 | jit.prepareCallOperation(vm);
|
---|
161 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationLookupExceptionHandler)), GPRInfo::nonArgGPR0);
|
---|
162 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
163 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
164 | jit.jumpToExceptionHandler(vm);
|
---|
165 |
|
---|
166 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
167 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Throw exception from call slow path thunk");
|
---|
168 | }
|
---|
169 |
|
---|
170 | static void slowPathFor(CCallHelpers& jit, VM& vm, Sprt_JITOperation_EGCli slowPathFunction)
|
---|
171 | {
|
---|
172 | jit.emitFunctionPrologue();
|
---|
173 | jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
|
---|
174 | #if OS(WINDOWS) && CPU(X86_64)
|
---|
175 | // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
|
---|
176 | // Other argument values are shift by 1. Use space on the stack for our two return values.
|
---|
177 | // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
|
---|
178 | // and space for the 16 byte return area.
|
---|
179 | jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), CCallHelpers::stackPointerRegister);
|
---|
180 | static_assert(GPRInfo::regT2 != GPRInfo::argumentGPR0);
|
---|
181 | static_assert(GPRInfo::regT3 != GPRInfo::argumentGPR0);
|
---|
182 | jit.move(GPRInfo::regT2, GPRInfo::argumentGPR0);
|
---|
183 | jit.move(GPRInfo::regT3, GPRInfo::argumentGPR2);
|
---|
184 | jit.move(GPRInfo::argumentGPR0, GPRInfo::argumentGPR3);
|
---|
185 | jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
|
---|
186 | jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
|
---|
187 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
|
---|
188 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
189 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
190 | jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
|
---|
191 | jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
|
---|
192 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
|
---|
193 | #else
|
---|
194 | if (maxFrameExtentForSlowPathCall)
|
---|
195 | jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
|
---|
196 | jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT3, GPRInfo::regT2);
|
---|
197 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
|
---|
198 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
199 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
200 | if (maxFrameExtentForSlowPathCall)
|
---|
201 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
|
---|
202 | #endif
|
---|
203 |
|
---|
204 | // This slow call will return the address of one of the following:
|
---|
205 | // 1) Exception throwing thunk.
|
---|
206 | // 2) Host call return value returner thingy.
|
---|
207 | // 3) The function to call.
|
---|
208 | // The second return value GPR will hold a non-zero value for tail calls.
|
---|
209 |
|
---|
210 | emitPointerValidation(jit, GPRInfo::returnValueGPR, JSEntryPtrTag);
|
---|
211 | jit.emitFunctionEpilogue();
|
---|
212 | jit.untagReturnAddress();
|
---|
213 |
|
---|
214 | RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
|
---|
215 | CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
|
---|
216 |
|
---|
217 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
|
---|
218 | jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
|
---|
219 |
|
---|
220 | doNotTrash.link(&jit);
|
---|
221 | jit.farJump(GPRInfo::returnValueGPR, JSEntryPtrTag);
|
---|
222 | }
|
---|
223 |
|
---|
224 | MacroAssemblerCodeRef<JITThunkPtrTag> linkCallThunkGenerator(VM& vm)
|
---|
225 | {
|
---|
226 | // The return address is on the stack or in the link register. We will hence
|
---|
227 | // save the return address to the call frame while we make a C++ function call
|
---|
228 | // to perform linking and lazy compilation if necessary. We expect the callee
|
---|
229 | // to be in regT0/regT1 (payload/tag), the CallFrame to have already
|
---|
230 | // been adjusted, and all other registers to be available for use.
|
---|
231 | CCallHelpers jit;
|
---|
232 |
|
---|
233 | slowPathFor(jit, vm, operationLinkCall);
|
---|
234 |
|
---|
235 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
236 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Link call slow path thunk");
|
---|
237 | }
|
---|
238 |
|
---|
239 | // For closure optimizations, we only include calls, since if you're using closures for
|
---|
240 | // object construction then you're going to lose big time anyway.
|
---|
241 | MacroAssemblerCodeRef<JITThunkPtrTag> linkPolymorphicCallThunkGenerator(VM& vm)
|
---|
242 | {
|
---|
243 | CCallHelpers jit;
|
---|
244 |
|
---|
245 | slowPathFor(jit, vm, operationLinkPolymorphicCall);
|
---|
246 |
|
---|
247 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
248 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Link polymorphic call slow path thunk");
|
---|
249 | }
|
---|
250 |
|
---|
251 | // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
|
---|
252 | // path virtual call so that we can enable fast tail calls for megamorphic
|
---|
253 | // virtual calls by using the shuffler.
|
---|
254 | // https://bugs.webkit.org/show_bug.cgi?id=148831
|
---|
255 | static MacroAssemblerCodeRef<JITThunkPtrTag> virtualThunkFor(VM& vm, CallMode mode, CodeSpecializationKind kind)
|
---|
256 | {
|
---|
257 | // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
|
---|
258 | // The return address is on the stack, or in the link register. We will hence
|
---|
259 | // jump to the callee, or save the return address to the call frame while we
|
---|
260 | // make a C++ function call to the appropriate JIT operation.
|
---|
261 |
|
---|
262 | CCallHelpers jit;
|
---|
263 |
|
---|
264 | bool isTailCall = mode == CallMode::Tail;
|
---|
265 |
|
---|
266 | CCallHelpers::JumpList slowCase;
|
---|
267 |
|
---|
268 | // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
|
---|
269 | // slow path execution for the profiler.
|
---|
270 | jit.add32(
|
---|
271 | CCallHelpers::TrustedImm32(1),
|
---|
272 | CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
|
---|
273 |
|
---|
274 | // FIXME: we should have a story for eliminating these checks. In many cases,
|
---|
275 | // the DFG knows that the value is definitely a cell, or definitely a function.
|
---|
276 |
|
---|
277 | #if USE(JSVALUE64)
|
---|
278 | if (isTailCall) {
|
---|
279 | // Tail calls could have clobbered the GPRInfo::notCellMaskRegister because they
|
---|
280 | // restore callee saved registers before getthing here. So, let's materialize
|
---|
281 | // the NotCellMask in a temp register and use the temp instead.
|
---|
282 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT0, DoNotHaveTagRegisters));
|
---|
283 | } else
|
---|
284 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT0));
|
---|
285 | #else
|
---|
286 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT1));
|
---|
287 | #endif
|
---|
288 | auto notJSFunction = jit.branchIfNotFunction(GPRInfo::regT0);
|
---|
289 |
|
---|
290 | // Now we know we have a JSFunction.
|
---|
291 |
|
---|
292 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutableOrRareData()), GPRInfo::regT0);
|
---|
293 | auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
|
---|
294 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), GPRInfo::regT0);
|
---|
295 | hasExecutable.link(&jit);
|
---|
296 | jit.loadPtr(
|
---|
297 | CCallHelpers::Address(GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
|
---|
298 | GPRInfo::regT4);
|
---|
299 | slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
|
---|
300 |
|
---|
301 | // Now we know that we have a CodeBlock, and we're committed to making a fast call.
|
---|
302 |
|
---|
303 | auto isNative = jit.branchIfNotType(GPRInfo::regT0, FunctionExecutableType);
|
---|
304 | jit.loadPtr(
|
---|
305 | CCallHelpers::Address(GPRInfo::regT0, FunctionExecutable::offsetOfCodeBlockFor(kind)),
|
---|
306 | GPRInfo::regT5);
|
---|
307 |
|
---|
308 | // Make a tail call. This will return back to JIT code.
|
---|
309 | emitPointerValidation(jit, GPRInfo::regT4, JSEntryPtrTag);
|
---|
310 | if (isTailCall) {
|
---|
311 | jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
|
---|
312 | jit.prepareForTailCallSlow(GPRInfo::regT4, GPRInfo::regT5);
|
---|
313 | }
|
---|
314 | jit.storePtr(GPRInfo::regT5, CCallHelpers::calleeFrameCodeBlockBeforeTailCall());
|
---|
315 | jit.farJump(GPRInfo::regT4, JSEntryPtrTag);
|
---|
316 |
|
---|
317 | // NullSetterFunctionType does not get the fast path support. But it is OK since using NullSetterFunctionType is extremely rare.
|
---|
318 | notJSFunction.link(&jit);
|
---|
319 | slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
|
---|
320 | void* executableAddress = vm.getCTIInternalFunctionTrampolineFor(kind).executableAddress();
|
---|
321 | jit.move(CCallHelpers::TrustedImmPtr(executableAddress), GPRInfo::regT4);
|
---|
322 |
|
---|
323 | isNative.link(&jit);
|
---|
324 | emitPointerValidation(jit, GPRInfo::regT4, JSEntryPtrTag);
|
---|
325 | if (isTailCall) {
|
---|
326 | jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
|
---|
327 | jit.prepareForTailCallSlow(GPRInfo::regT4);
|
---|
328 | }
|
---|
329 | jit.farJump(GPRInfo::regT4, JSEntryPtrTag);
|
---|
330 |
|
---|
331 | slowCase.link(&jit);
|
---|
332 |
|
---|
333 | // Here we don't know anything, so revert to the full slow path.
|
---|
334 | slowPathFor(jit, vm, operationVirtualCall);
|
---|
335 |
|
---|
336 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::VirtualThunk);
|
---|
337 | return FINALIZE_THUNK(
|
---|
338 | patchBuffer, JITThunkPtrTag,
|
---|
339 | "Virtual %s slow path thunk",
|
---|
340 | mode == CallMode::Regular ? "call" : mode == CallMode::Tail ? "tail call" : "construct");
|
---|
341 | }
|
---|
342 |
|
---|
343 | static MacroAssemblerCodeRef<JITThunkPtrTag> virtualThunkForRegularCall(VM& vm)
|
---|
344 | {
|
---|
345 | return virtualThunkFor(vm, CallMode::Regular, CodeForCall);
|
---|
346 | }
|
---|
347 |
|
---|
348 | static MacroAssemblerCodeRef<JITThunkPtrTag> virtualThunkForTailCall(VM& vm)
|
---|
349 | {
|
---|
350 | return virtualThunkFor(vm, CallMode::Tail, CodeForCall);
|
---|
351 | }
|
---|
352 |
|
---|
353 | static MacroAssemblerCodeRef<JITThunkPtrTag> virtualThunkForConstructConstruct(VM& vm)
|
---|
354 | {
|
---|
355 | return virtualThunkFor(vm, CallMode::Construct, CodeForConstruct);
|
---|
356 | }
|
---|
357 |
|
---|
358 | MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunkFor(VM& vm, CallMode callMode)
|
---|
359 | {
|
---|
360 | auto generator = [&] () -> ThunkGenerator {
|
---|
361 | switch (callMode) {
|
---|
362 | case CallMode::Regular:
|
---|
363 | return virtualThunkForRegularCall;
|
---|
364 | case CallMode::Tail:
|
---|
365 | return virtualThunkForTailCall;
|
---|
366 | case CallMode::Construct:
|
---|
367 | return virtualThunkForConstructConstruct;
|
---|
368 | }
|
---|
369 | RELEASE_ASSERT_NOT_REACHED();
|
---|
370 | };
|
---|
371 | return vm.getCTIStub(generator()).retagged<JITStubRoutinePtrTag>();
|
---|
372 | }
|
---|
373 |
|
---|
374 | enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
|
---|
375 | enum class ThunkFunctionType { JSFunction, InternalFunction };
|
---|
376 |
|
---|
377 | static MacroAssemblerCodeRef<JITThunkPtrTag> nativeForGenerator(VM& vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
|
---|
378 | {
|
---|
379 | // FIXME: This should be able to log ShadowChicken prologue packets.
|
---|
380 | // https://bugs.webkit.org/show_bug.cgi?id=155689
|
---|
381 |
|
---|
382 | int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
|
---|
383 |
|
---|
384 | JSInterfaceJIT jit(&vm);
|
---|
385 |
|
---|
386 | switch (entryType) {
|
---|
387 | case EnterViaCall:
|
---|
388 | jit.emitFunctionPrologue();
|
---|
389 | break;
|
---|
390 | case EnterViaJumpWithSavedTags:
|
---|
391 | #if USE(JSVALUE64)
|
---|
392 | // We're coming from a specialized thunk that has saved the prior tag registers' contents.
|
---|
393 | // Restore them now.
|
---|
394 | jit.popPair(JSInterfaceJIT::numberTagRegister, JSInterfaceJIT::notCellMaskRegister);
|
---|
395 | #endif
|
---|
396 | break;
|
---|
397 | case EnterViaJumpWithoutSavedTags:
|
---|
398 | jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
|
---|
399 | break;
|
---|
400 | }
|
---|
401 |
|
---|
402 | jit.emitPutToCallFrameHeader(nullptr, CallFrameSlot::codeBlock);
|
---|
403 | jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
|
---|
404 |
|
---|
405 | // Host function signature: f(JSGlobalObject*, CallFrame*);
|
---|
406 | #if CPU(X86_64) && OS(WINDOWS)
|
---|
407 | // Leave space for the callee parameter home addresses.
|
---|
408 | // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
|
---|
409 | jit.subPtr(CCallHelpers::TrustedImm32(4 * sizeof(int64_t)), CCallHelpers::stackPointerRegister);
|
---|
410 | #elif CPU(MIPS)
|
---|
411 | // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
|
---|
412 | jit.subPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister);
|
---|
413 | #endif
|
---|
414 |
|
---|
415 | jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
|
---|
416 | jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, GPRInfo::argumentGPR2);
|
---|
417 |
|
---|
418 | if (thunkFunctionType == ThunkFunctionType::JSFunction) {
|
---|
419 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, JSFunction::offsetOfScopeChain()), GPRInfo::argumentGPR0);
|
---|
420 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, JSFunction::offsetOfExecutableOrRareData()), GPRInfo::argumentGPR2);
|
---|
421 | auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::argumentGPR2, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
|
---|
422 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), GPRInfo::argumentGPR2);
|
---|
423 | hasExecutable.link(&jit);
|
---|
424 | if (Options::useJITCage()) {
|
---|
425 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, executableOffsetToFunction), GPRInfo::argumentGPR2);
|
---|
426 | auto operationCall = jit.call(OperationPtrTag);
|
---|
427 | jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
|
---|
428 | linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(vmEntryHostFunction));
|
---|
429 | });
|
---|
430 | } else
|
---|
431 | jit.call(CCallHelpers::Address(GPRInfo::argumentGPR2, executableOffsetToFunction), HostFunctionPtrTag);
|
---|
432 | } else {
|
---|
433 | ASSERT(thunkFunctionType == ThunkFunctionType::InternalFunction);
|
---|
434 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, InternalFunction::offsetOfGlobalObject()), GPRInfo::argumentGPR0);
|
---|
435 | if (Options::useJITCage()) {
|
---|
436 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, InternalFunction::offsetOfNativeFunctionFor(kind)), GPRInfo::argumentGPR2);
|
---|
437 | auto operationCall = jit.call(OperationPtrTag);
|
---|
438 | jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
|
---|
439 | linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(vmEntryHostFunction));
|
---|
440 | });
|
---|
441 | } else
|
---|
442 | jit.call(CCallHelpers::Address(GPRInfo::argumentGPR2, InternalFunction::offsetOfNativeFunctionFor(kind)), HostFunctionPtrTag);
|
---|
443 | }
|
---|
444 |
|
---|
445 | #if CPU(X86_64) && OS(WINDOWS)
|
---|
446 | jit.addPtr(CCallHelpers::TrustedImm32(4 * sizeof(int64_t)), CCallHelpers::stackPointerRegister);
|
---|
447 | #elif CPU(MIPS)
|
---|
448 | jit.addPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister);
|
---|
449 | #endif
|
---|
450 |
|
---|
451 | // Check for an exception
|
---|
452 | #if USE(JSVALUE64)
|
---|
453 | jit.loadPtr(vm.addressOfException(), JSInterfaceJIT::regT2);
|
---|
454 | JSInterfaceJIT::Jump exceptionHandler = jit.branchTestPtr(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
|
---|
455 | #else
|
---|
456 | JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
|
---|
457 | JSInterfaceJIT::NotEqual,
|
---|
458 | JSInterfaceJIT::AbsoluteAddress(vm.addressOfException()),
|
---|
459 | JSInterfaceJIT::TrustedImm32(0));
|
---|
460 | #endif
|
---|
461 |
|
---|
462 | jit.emitFunctionEpilogue();
|
---|
463 | // Return.
|
---|
464 | jit.ret();
|
---|
465 |
|
---|
466 | // Handle an exception
|
---|
467 | exceptionHandler.link(&jit);
|
---|
468 |
|
---|
469 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::argumentGPR0);
|
---|
470 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm.topCallFrame);
|
---|
471 | #if OS(WINDOWS)
|
---|
472 | // Allocate space on stack for the 4 parameter registers.
|
---|
473 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
|
---|
474 | #elif CPU(MIPS)
|
---|
475 | // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
|
---|
476 | jit.subPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister);
|
---|
477 | #endif
|
---|
478 | jit.move(CCallHelpers::TrustedImmPtr(&vm), JSInterfaceJIT::argumentGPR0);
|
---|
479 | jit.move(JSInterfaceJIT::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationVMHandleException)), JSInterfaceJIT::regT3);
|
---|
480 | jit.call(JSInterfaceJIT::regT3, OperationPtrTag);
|
---|
481 | #if OS(WINDOWS)
|
---|
482 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
|
---|
483 | #elif CPU(MIPS)
|
---|
484 | jit.addPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister);
|
---|
485 | #endif
|
---|
486 |
|
---|
487 | jit.jumpToExceptionHandler(vm);
|
---|
488 |
|
---|
489 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
490 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
|
---|
491 | }
|
---|
492 |
|
---|
493 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeCallGenerator(VM& vm)
|
---|
494 | {
|
---|
495 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall);
|
---|
496 | }
|
---|
497 |
|
---|
498 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallGenerator(VM& vm)
|
---|
499 | {
|
---|
500 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags);
|
---|
501 | }
|
---|
502 |
|
---|
503 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallWithoutSavedTagsGenerator(VM& vm)
|
---|
504 | {
|
---|
505 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags);
|
---|
506 | }
|
---|
507 |
|
---|
508 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeConstructGenerator(VM& vm)
|
---|
509 | {
|
---|
510 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct);
|
---|
511 | }
|
---|
512 |
|
---|
513 | MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionCallGenerator(VM& vm)
|
---|
514 | {
|
---|
515 | return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall);
|
---|
516 | }
|
---|
517 |
|
---|
518 | MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionConstructGenerator(VM& vm)
|
---|
519 | {
|
---|
520 | return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct);
|
---|
521 | }
|
---|
522 |
|
---|
523 | MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupGenerator(VM& vm)
|
---|
524 | {
|
---|
525 | JSInterfaceJIT jit(&vm);
|
---|
526 |
|
---|
527 | // We enter with fixup count in argumentGPR0
|
---|
528 | // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
|
---|
529 | #if USE(JSVALUE64)
|
---|
530 | #if OS(WINDOWS)
|
---|
531 | const GPRReg extraTemp = JSInterfaceJIT::regT0;
|
---|
532 | #else
|
---|
533 | const GPRReg extraTemp = JSInterfaceJIT::regT5;
|
---|
534 | #endif
|
---|
535 | # if CPU(X86_64)
|
---|
536 | jit.pop(JSInterfaceJIT::regT4);
|
---|
537 | # endif
|
---|
538 | jit.tagReturnAddress();
|
---|
539 | #if CPU(ARM64E)
|
---|
540 | jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
|
---|
541 | jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
|
---|
542 | jit.untagPtr(extraTemp, GPRInfo::regT3);
|
---|
543 | jit.validateUntaggedPtr(GPRInfo::regT3, extraTemp);
|
---|
544 | PtrTag tempReturnPCTag = static_cast<PtrTag>(random());
|
---|
545 | jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
|
---|
546 | jit.tagPtr(extraTemp, GPRInfo::regT3);
|
---|
547 | jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
|
---|
548 | #endif
|
---|
549 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
|
---|
550 | jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCountIncludingThis), JSInterfaceJIT::argumentGPR2);
|
---|
551 | jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
|
---|
552 |
|
---|
553 | // Check to see if we have extra slots we can use
|
---|
554 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
|
---|
555 | jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
|
---|
556 | JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
|
---|
557 | jit.move(JSInterfaceJIT::TrustedImm64(JSValue::ValueUndefined), extraTemp);
|
---|
558 | JSInterfaceJIT::Label fillExtraSlots(jit.label());
|
---|
559 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
|
---|
560 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
|
---|
561 | jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
|
---|
562 | jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
|
---|
563 | JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
|
---|
564 | noExtraSlot.link(&jit);
|
---|
565 |
|
---|
566 | jit.neg64(JSInterfaceJIT::argumentGPR0);
|
---|
567 |
|
---|
568 | // Adjust call frame register and stack pointer to account for missing args.
|
---|
569 | // We need to change the stack pointer first before performing copy/fill loops.
|
---|
570 | // This stack space below the stack pointer is considered unused by OS. Therefore,
|
---|
571 | // OS may corrupt this space when constructing a signal stack.
|
---|
572 | jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
|
---|
573 | jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
|
---|
574 | jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
|
---|
575 | jit.untagReturnAddress();
|
---|
576 | jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
|
---|
577 | jit.tagReturnAddress();
|
---|
578 |
|
---|
579 | // Move current frame down argumentGPR0 number of slots
|
---|
580 | JSInterfaceJIT::Label copyLoop(jit.label());
|
---|
581 | jit.load64(CCallHelpers::Address(JSInterfaceJIT::regT3), extraTemp);
|
---|
582 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
|
---|
583 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
|
---|
584 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
|
---|
585 |
|
---|
586 | // Fill in argumentGPR0 missing arg slots with undefined
|
---|
587 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
|
---|
588 | jit.move(JSInterfaceJIT::TrustedImm64(JSValue::ValueUndefined), extraTemp);
|
---|
589 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
|
---|
590 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
|
---|
591 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
|
---|
592 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
|
---|
593 |
|
---|
594 | done.link(&jit);
|
---|
595 |
|
---|
596 | #if CPU(ARM64E)
|
---|
597 | jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
|
---|
598 | jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
|
---|
599 | jit.untagPtr(extraTemp, GPRInfo::regT3);
|
---|
600 | jit.validateUntaggedPtr(GPRInfo::regT3, extraTemp);
|
---|
601 | jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
|
---|
602 | jit.tagPtr(extraTemp, GPRInfo::regT3);
|
---|
603 | jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
|
---|
604 | #endif
|
---|
605 |
|
---|
606 | # if CPU(X86_64)
|
---|
607 | jit.push(JSInterfaceJIT::regT4);
|
---|
608 | # endif
|
---|
609 | jit.ret();
|
---|
610 | #else // USE(JSVALUE64) section above, USE(JSVALUE32_64) section below.
|
---|
611 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
|
---|
612 | jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCountIncludingThis), JSInterfaceJIT::argumentGPR2);
|
---|
613 | jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
|
---|
614 |
|
---|
615 | // Check to see if we have extra slots we can use
|
---|
616 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
|
---|
617 | jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
|
---|
618 | JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
|
---|
619 | JSInterfaceJIT::Label fillExtraSlots(jit.label());
|
---|
620 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
|
---|
621 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
|
---|
622 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
|
---|
623 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
|
---|
624 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
|
---|
625 | jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
|
---|
626 | jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
|
---|
627 | JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
|
---|
628 | noExtraSlot.link(&jit);
|
---|
629 |
|
---|
630 | jit.neg32(JSInterfaceJIT::argumentGPR0);
|
---|
631 |
|
---|
632 | // Adjust call frame register and stack pointer to account for missing args.
|
---|
633 | // We need to change the stack pointer first before performing copy/fill loops.
|
---|
634 | // This stack space below the stack pointer is considered unused by OS. Therefore,
|
---|
635 | // OS may corrupt this space when constructing a signal stack.
|
---|
636 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
|
---|
637 | jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
|
---|
638 | jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
|
---|
639 | jit.untagReturnAddress();
|
---|
640 | jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
|
---|
641 | jit.tagReturnAddress();
|
---|
642 |
|
---|
643 | // Move current frame down argumentGPR0 number of slots
|
---|
644 | JSInterfaceJIT::Label copyLoop(jit.label());
|
---|
645 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
|
---|
646 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
|
---|
647 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
|
---|
648 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
|
---|
649 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
|
---|
650 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
|
---|
651 |
|
---|
652 | // Fill in argumentGPR0 missing arg slots with undefined
|
---|
653 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
|
---|
654 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
|
---|
655 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
|
---|
656 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
|
---|
657 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
|
---|
658 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
|
---|
659 |
|
---|
660 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
|
---|
661 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
|
---|
662 |
|
---|
663 | done.link(&jit);
|
---|
664 |
|
---|
665 | jit.ret();
|
---|
666 | #endif // End of USE(JSVALUE32_64) section.
|
---|
667 |
|
---|
668 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
669 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "fixup arity");
|
---|
670 | }
|
---|
671 |
|
---|
672 | MacroAssemblerCodeRef<JITThunkPtrTag> unreachableGenerator(VM& vm)
|
---|
673 | {
|
---|
674 | JSInterfaceJIT jit(&vm);
|
---|
675 |
|
---|
676 | jit.breakpoint();
|
---|
677 |
|
---|
678 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
679 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "unreachable thunk");
|
---|
680 | }
|
---|
681 |
|
---|
682 | MacroAssemblerCodeRef<JITThunkPtrTag> stringGetByValGenerator(VM& vm)
|
---|
683 | {
|
---|
684 | // regT0 is JSString*, and regT1 (64bit) or regT2 (32bit) is int index.
|
---|
685 | // Return regT0 = result JSString* if succeeds. Otherwise, return regT0 = 0.
|
---|
686 | #if USE(JSVALUE64)
|
---|
687 | GPRReg stringGPR = GPRInfo::regT0;
|
---|
688 | GPRReg indexGPR = GPRInfo::regT1;
|
---|
689 | GPRReg scratchGPR = GPRInfo::regT2;
|
---|
690 | #else
|
---|
691 | GPRReg stringGPR = GPRInfo::regT0;
|
---|
692 | GPRReg indexGPR = GPRInfo::regT2;
|
---|
693 | GPRReg scratchGPR = GPRInfo::regT1;
|
---|
694 | #endif
|
---|
695 |
|
---|
696 | JSInterfaceJIT jit(&vm);
|
---|
697 | JSInterfaceJIT::JumpList failures;
|
---|
698 | jit.tagReturnAddress();
|
---|
699 |
|
---|
700 | // Load string length to regT2, and start the process of loading the data pointer into regT0
|
---|
701 | jit.loadPtr(JSInterfaceJIT::Address(stringGPR, JSString::offsetOfValue()), stringGPR);
|
---|
702 | failures.append(jit.branchIfRopeStringImpl(stringGPR));
|
---|
703 | jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::lengthMemoryOffset()), scratchGPR);
|
---|
704 |
|
---|
705 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
|
---|
706 | failures.append(jit.branch32(JSInterfaceJIT::AboveOrEqual, indexGPR, scratchGPR));
|
---|
707 |
|
---|
708 | // Load the character
|
---|
709 | JSInterfaceJIT::JumpList cont8Bit;
|
---|
710 | // Load the string flags
|
---|
711 | jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::flagsOffset()), scratchGPR);
|
---|
712 | jit.loadPtr(JSInterfaceJIT::Address(stringGPR, StringImpl::dataOffset()), stringGPR);
|
---|
713 | auto is16Bit = jit.branchTest32(JSInterfaceJIT::Zero, scratchGPR, JSInterfaceJIT::TrustedImm32(StringImpl::flagIs8Bit()));
|
---|
714 | jit.load8(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesOne, 0), stringGPR);
|
---|
715 | cont8Bit.append(jit.jump());
|
---|
716 | is16Bit.link(&jit);
|
---|
717 | jit.load16(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesTwo, 0), stringGPR);
|
---|
718 | cont8Bit.link(&jit);
|
---|
719 |
|
---|
720 | failures.append(jit.branch32(JSInterfaceJIT::Above, stringGPR, JSInterfaceJIT::TrustedImm32(maxSingleCharacterString)));
|
---|
721 | jit.move(JSInterfaceJIT::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), indexGPR);
|
---|
722 | jit.loadPtr(JSInterfaceJIT::BaseIndex(indexGPR, stringGPR, JSInterfaceJIT::ScalePtr, 0), stringGPR);
|
---|
723 | jit.ret();
|
---|
724 |
|
---|
725 | failures.link(&jit);
|
---|
726 | jit.move(JSInterfaceJIT::TrustedImm32(0), stringGPR);
|
---|
727 | jit.ret();
|
---|
728 |
|
---|
729 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
|
---|
730 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "String get_by_val stub");
|
---|
731 | }
|
---|
732 |
|
---|
733 | static void stringCharLoad(SpecializedThunkJIT& jit)
|
---|
734 | {
|
---|
735 | // load string
|
---|
736 | jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
|
---|
737 |
|
---|
738 | // Load string length to regT2, and start the process of loading the data pointer into regT0
|
---|
739 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, JSString::offsetOfValue()), SpecializedThunkJIT::regT0);
|
---|
740 | jit.appendFailure(jit.branchIfRopeStringImpl(SpecializedThunkJIT::regT0));
|
---|
741 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::lengthMemoryOffset()), SpecializedThunkJIT::regT2);
|
---|
742 |
|
---|
743 | // load index
|
---|
744 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
|
---|
745 |
|
---|
746 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
|
---|
747 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
|
---|
748 |
|
---|
749 | // Load the character
|
---|
750 | SpecializedThunkJIT::JumpList is16Bit;
|
---|
751 | SpecializedThunkJIT::JumpList cont8Bit;
|
---|
752 | // Load the string flags
|
---|
753 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
|
---|
754 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
|
---|
755 | is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
|
---|
756 | jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
|
---|
757 | cont8Bit.append(jit.jump());
|
---|
758 | is16Bit.link(&jit);
|
---|
759 | jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
|
---|
760 | cont8Bit.link(&jit);
|
---|
761 | }
|
---|
762 |
|
---|
763 | static void charToString(SpecializedThunkJIT& jit, VM& vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
|
---|
764 | {
|
---|
765 | jit.appendFailure(jit.branch32(MacroAssembler::Above, src, MacroAssembler::TrustedImm32(maxSingleCharacterString)));
|
---|
766 | jit.move(MacroAssembler::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), scratch);
|
---|
767 | jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
|
---|
768 | jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
|
---|
769 | }
|
---|
770 |
|
---|
771 | MacroAssemblerCodeRef<JITThunkPtrTag> charCodeAtThunkGenerator(VM& vm)
|
---|
772 | {
|
---|
773 | SpecializedThunkJIT jit(vm, 1);
|
---|
774 | stringCharLoad(jit);
|
---|
775 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
776 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "charCodeAt");
|
---|
777 | }
|
---|
778 |
|
---|
779 | MacroAssemblerCodeRef<JITThunkPtrTag> charAtThunkGenerator(VM& vm)
|
---|
780 | {
|
---|
781 | SpecializedThunkJIT jit(vm, 1);
|
---|
782 | stringCharLoad(jit);
|
---|
783 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
|
---|
784 | jit.returnJSCell(SpecializedThunkJIT::regT0);
|
---|
785 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "charAt");
|
---|
786 | }
|
---|
787 |
|
---|
788 | MacroAssemblerCodeRef<JITThunkPtrTag> fromCharCodeThunkGenerator(VM& vm)
|
---|
789 | {
|
---|
790 | SpecializedThunkJIT jit(vm, 1);
|
---|
791 | // load char code
|
---|
792 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
|
---|
793 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
|
---|
794 | jit.returnJSCell(SpecializedThunkJIT::regT0);
|
---|
795 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "fromCharCode");
|
---|
796 | }
|
---|
797 |
|
---|
798 | MacroAssemblerCodeRef<JITThunkPtrTag> stringPrototypeCodePointAtThunkGenerator(VM& vm)
|
---|
799 | {
|
---|
800 | SpecializedThunkJIT jit(vm, 1);
|
---|
801 |
|
---|
802 | // load string
|
---|
803 | jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, GPRInfo::regT0);
|
---|
804 |
|
---|
805 | // Load string length to regT3, and start the process of loading the data pointer into regT2
|
---|
806 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSString::offsetOfValue()), GPRInfo::regT0);
|
---|
807 | jit.appendFailure(jit.branchIfRopeStringImpl(GPRInfo::regT0));
|
---|
808 | jit.load32(CCallHelpers::Address(GPRInfo::regT0, StringImpl::lengthMemoryOffset()), GPRInfo::regT3);
|
---|
809 |
|
---|
810 | // load index
|
---|
811 | jit.loadInt32Argument(0, GPRInfo::regT1); // regT1 contains the index
|
---|
812 |
|
---|
813 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
|
---|
814 | jit.appendFailure(jit.branch32(CCallHelpers::AboveOrEqual, GPRInfo::regT1, GPRInfo::regT3));
|
---|
815 |
|
---|
816 | // Load the character
|
---|
817 | CCallHelpers::JumpList done;
|
---|
818 | // Load the string flags
|
---|
819 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, StringImpl::dataOffset()), GPRInfo::regT2);
|
---|
820 | auto is16Bit = jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(GPRInfo::regT0, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIs8Bit()));
|
---|
821 | jit.load8(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesOne, 0), GPRInfo::regT0);
|
---|
822 | done.append(jit.jump());
|
---|
823 |
|
---|
824 | is16Bit.link(&jit);
|
---|
825 | jit.load16(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesTwo, 0), GPRInfo::regT0);
|
---|
826 | // Original index is int32_t, and here, we ensure that it is positive. If we interpret it as uint32_t, adding 1 never overflows.
|
---|
827 | jit.add32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
828 | done.append(jit.branch32(CCallHelpers::AboveOrEqual, GPRInfo::regT1, GPRInfo::regT3));
|
---|
829 | jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), GPRInfo::regT0, GPRInfo::regT3);
|
---|
830 | done.append(jit.branch32(CCallHelpers::NotEqual, GPRInfo::regT3, CCallHelpers::TrustedImm32(0xd800)));
|
---|
831 | jit.load16(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesTwo, 0), GPRInfo::regT2);
|
---|
832 | jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), GPRInfo::regT2, GPRInfo::regT3);
|
---|
833 | done.append(jit.branch32(CCallHelpers::NotEqual, GPRInfo::regT3, CCallHelpers::TrustedImm32(0xdc00)));
|
---|
834 | jit.lshift32(CCallHelpers::TrustedImm32(10), GPRInfo::regT0);
|
---|
835 | jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::regT0, GPRInfo::regT2, CCallHelpers::TimesOne, -U16_SURROGATE_OFFSET), GPRInfo::regT0);
|
---|
836 | done.link(&jit);
|
---|
837 |
|
---|
838 | jit.returnInt32(GPRInfo::regT0);
|
---|
839 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "codePointAt");
|
---|
840 | }
|
---|
841 |
|
---|
842 | MacroAssemblerCodeRef<JITThunkPtrTag> clz32ThunkGenerator(VM& vm)
|
---|
843 | {
|
---|
844 | SpecializedThunkJIT jit(vm, 1);
|
---|
845 | MacroAssembler::Jump nonIntArgJump;
|
---|
846 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
|
---|
847 |
|
---|
848 | SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
|
---|
849 | jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
|
---|
850 | jit.returnInt32(SpecializedThunkJIT::regT1);
|
---|
851 |
|
---|
852 | if (jit.supportsFloatingPointTruncate()) {
|
---|
853 | nonIntArgJump.link(&jit);
|
---|
854 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
855 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
|
---|
856 | jit.appendFailure(jit.jump());
|
---|
857 | } else
|
---|
858 | jit.appendFailure(nonIntArgJump);
|
---|
859 |
|
---|
860 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "clz32");
|
---|
861 | }
|
---|
862 |
|
---|
863 | MacroAssemblerCodeRef<JITThunkPtrTag> sqrtThunkGenerator(VM& vm)
|
---|
864 | {
|
---|
865 | SpecializedThunkJIT jit(vm, 1);
|
---|
866 | if (!jit.supportsFloatingPointSqrt())
|
---|
867 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
868 |
|
---|
869 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
870 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
|
---|
871 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
872 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "sqrt");
|
---|
873 | }
|
---|
874 |
|
---|
875 |
|
---|
876 | #define UnaryDoubleOpWrapper(function) function##Wrapper
|
---|
877 | enum MathThunkCallingConvention { };
|
---|
878 | typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
|
---|
879 |
|
---|
880 | #if CPU(X86_64) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX))
|
---|
881 |
|
---|
882 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
883 | asm( \
|
---|
884 | ".text\n" \
|
---|
885 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \
|
---|
886 | HIDE_SYMBOL(function##Thunk) "\n" \
|
---|
887 | SYMBOL_STRING(function##Thunk) ":" "\n" \
|
---|
888 | "pushq %rax\n" \
|
---|
889 | "call " GLOBAL_REFERENCE(function) "\n" \
|
---|
890 | "popq %rcx\n" \
|
---|
891 | "ret\n" \
|
---|
892 | ".previous\n" \
|
---|
893 | );\
|
---|
894 | extern "C" { \
|
---|
895 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
|
---|
896 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
897 | } \
|
---|
898 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
899 |
|
---|
900 | #elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && OS(LINUX) && defined(__PIC__)
|
---|
901 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
902 | asm( \
|
---|
903 | ".text\n" \
|
---|
904 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \
|
---|
905 | HIDE_SYMBOL(function##Thunk) "\n" \
|
---|
906 | SYMBOL_STRING(function##Thunk) ":" "\n" \
|
---|
907 | "pushl %ebx\n" \
|
---|
908 | "subl $20, %esp\n" \
|
---|
909 | "movsd %xmm0, (%esp) \n" \
|
---|
910 | "call __x86.get_pc_thunk.bx\n" \
|
---|
911 | "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
|
---|
912 | "call " GLOBAL_REFERENCE(function) "\n" \
|
---|
913 | "fstpl (%esp) \n" \
|
---|
914 | "movsd (%esp), %xmm0 \n" \
|
---|
915 | "addl $20, %esp\n" \
|
---|
916 | "popl %ebx\n" \
|
---|
917 | "ret\n" \
|
---|
918 | ".previous\n" \
|
---|
919 | );\
|
---|
920 | extern "C" { \
|
---|
921 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
|
---|
922 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
923 | } \
|
---|
924 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
925 |
|
---|
926 | #elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX))
|
---|
927 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
928 | asm( \
|
---|
929 | ".text\n" \
|
---|
930 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \
|
---|
931 | HIDE_SYMBOL(function##Thunk) "\n" \
|
---|
932 | SYMBOL_STRING(function##Thunk) ":" "\n" \
|
---|
933 | "subl $20, %esp\n" \
|
---|
934 | "movsd %xmm0, (%esp) \n" \
|
---|
935 | "call " GLOBAL_REFERENCE(function) "\n" \
|
---|
936 | "fstpl (%esp) \n" \
|
---|
937 | "movsd (%esp), %xmm0 \n" \
|
---|
938 | "addl $20, %esp\n" \
|
---|
939 | "ret\n" \
|
---|
940 | ".previous\n" \
|
---|
941 | );\
|
---|
942 | extern "C" { \
|
---|
943 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
|
---|
944 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
945 | } \
|
---|
946 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
947 |
|
---|
948 | #elif CPU(ARM_THUMB2) && COMPILER(GCC_COMPATIBLE) && OS(DARWIN)
|
---|
949 |
|
---|
950 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
951 | asm( \
|
---|
952 | ".text\n" \
|
---|
953 | ".align 2\n" \
|
---|
954 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \
|
---|
955 | HIDE_SYMBOL(function##Thunk) "\n" \
|
---|
956 | ".thumb\n" \
|
---|
957 | ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
|
---|
958 | SYMBOL_STRING(function##Thunk) ":" "\n" \
|
---|
959 | "push {lr}\n" \
|
---|
960 | "vmov r0, r1, d0\n" \
|
---|
961 | "blx " GLOBAL_REFERENCE(function) "\n" \
|
---|
962 | "vmov d0, r0, r1\n" \
|
---|
963 | "pop {lr}\n" \
|
---|
964 | "bx lr\n" \
|
---|
965 | ".previous\n" \
|
---|
966 | ); \
|
---|
967 | extern "C" { \
|
---|
968 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
|
---|
969 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
970 | } \
|
---|
971 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
972 |
|
---|
973 | #elif CPU(ARM64)
|
---|
974 |
|
---|
975 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
976 | asm( \
|
---|
977 | ".text\n" \
|
---|
978 | ".align 2\n" \
|
---|
979 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \
|
---|
980 | HIDE_SYMBOL(function##Thunk) "\n" \
|
---|
981 | SYMBOL_STRING(function##Thunk) ":" "\n" \
|
---|
982 | "b " GLOBAL_REFERENCE(function) "\n" \
|
---|
983 | ".previous\n" \
|
---|
984 | ); \
|
---|
985 | extern "C" { \
|
---|
986 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
|
---|
987 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
988 | } \
|
---|
989 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
990 |
|
---|
991 | #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
|
---|
992 |
|
---|
993 | // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
|
---|
994 | static double (_cdecl *floorFunction)(double) = floor;
|
---|
995 | static double (_cdecl *ceilFunction)(double) = ceil;
|
---|
996 | static double (_cdecl *truncFunction)(double) = trunc;
|
---|
997 | static double (_cdecl *expFunction)(double) = exp;
|
---|
998 | static double (_cdecl *logFunction)(double) = log;
|
---|
999 | static double (_cdecl *jsRoundFunction)(double) = jsRound;
|
---|
1000 |
|
---|
1001 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
1002 | extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
|
---|
1003 | { \
|
---|
1004 | __asm \
|
---|
1005 | { \
|
---|
1006 | __asm sub esp, 20 \
|
---|
1007 | __asm movsd mmword ptr [esp], xmm0 \
|
---|
1008 | __asm call function##Function \
|
---|
1009 | __asm fstp qword ptr [esp] \
|
---|
1010 | __asm movsd xmm0, mmword ptr [esp] \
|
---|
1011 | __asm add esp, 20 \
|
---|
1012 | __asm ret \
|
---|
1013 | } \
|
---|
1014 | } \
|
---|
1015 | JSC_ANNOTATE_JIT_OPERATION(function##Thunk); \
|
---|
1016 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
|
---|
1017 |
|
---|
1018 | #else
|
---|
1019 |
|
---|
1020 | #define defineUnaryDoubleOpWrapper(function) \
|
---|
1021 | static MathThunk UnaryDoubleOpWrapper(function) = 0
|
---|
1022 | #endif
|
---|
1023 |
|
---|
1024 | defineUnaryDoubleOpWrapper(jsRound);
|
---|
1025 | defineUnaryDoubleOpWrapper(exp);
|
---|
1026 | defineUnaryDoubleOpWrapper(log);
|
---|
1027 | defineUnaryDoubleOpWrapper(floor);
|
---|
1028 | defineUnaryDoubleOpWrapper(ceil);
|
---|
1029 | defineUnaryDoubleOpWrapper(trunc);
|
---|
1030 |
|
---|
1031 | MacroAssemblerCodeRef<JITThunkPtrTag> floorThunkGenerator(VM& vm)
|
---|
1032 | {
|
---|
1033 | SpecializedThunkJIT jit(vm, 1);
|
---|
1034 | MacroAssembler::Jump nonIntJump;
|
---|
1035 | if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
|
---|
1036 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1037 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
|
---|
1038 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1039 | nonIntJump.link(&jit);
|
---|
1040 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1041 |
|
---|
1042 | if (jit.supportsFloatingPointRounding()) {
|
---|
1043 | SpecializedThunkJIT::JumpList doubleResult;
|
---|
1044 | jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
|
---|
1045 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
|
---|
1046 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1047 | doubleResult.link(&jit);
|
---|
1048 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1049 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "floor");
|
---|
1050 | }
|
---|
1051 |
|
---|
1052 | SpecializedThunkJIT::Jump intResult;
|
---|
1053 | SpecializedThunkJIT::JumpList doubleResult;
|
---|
1054 | if (jit.supportsFloatingPointTruncate()) {
|
---|
1055 | jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
|
---|
1056 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
|
---|
1057 | SpecializedThunkJIT::JumpList slowPath;
|
---|
1058 | // Handle the negative doubles in the slow path for now.
|
---|
1059 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
|
---|
1060 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
|
---|
1061 | intResult = jit.jump();
|
---|
1062 | slowPath.link(&jit);
|
---|
1063 | }
|
---|
1064 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
|
---|
1065 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
|
---|
1066 | if (jit.supportsFloatingPointTruncate())
|
---|
1067 | intResult.link(&jit);
|
---|
1068 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1069 | doubleResult.link(&jit);
|
---|
1070 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1071 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "floor");
|
---|
1072 | }
|
---|
1073 |
|
---|
1074 | MacroAssemblerCodeRef<JITThunkPtrTag> ceilThunkGenerator(VM& vm)
|
---|
1075 | {
|
---|
1076 | SpecializedThunkJIT jit(vm, 1);
|
---|
1077 | if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
|
---|
1078 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1079 | MacroAssembler::Jump nonIntJump;
|
---|
1080 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
|
---|
1081 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1082 | nonIntJump.link(&jit);
|
---|
1083 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1084 | if (jit.supportsFloatingPointRounding())
|
---|
1085 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
|
---|
1086 | else
|
---|
1087 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
|
---|
1088 |
|
---|
1089 | SpecializedThunkJIT::JumpList doubleResult;
|
---|
1090 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
|
---|
1091 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1092 | doubleResult.link(&jit);
|
---|
1093 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1094 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "ceil");
|
---|
1095 | }
|
---|
1096 |
|
---|
1097 | MacroAssemblerCodeRef<JITThunkPtrTag> truncThunkGenerator(VM& vm)
|
---|
1098 | {
|
---|
1099 | SpecializedThunkJIT jit(vm, 1);
|
---|
1100 | if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
|
---|
1101 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1102 | MacroAssembler::Jump nonIntJump;
|
---|
1103 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
|
---|
1104 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1105 | nonIntJump.link(&jit);
|
---|
1106 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1107 | if (jit.supportsFloatingPointRounding())
|
---|
1108 | jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
|
---|
1109 | else
|
---|
1110 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
|
---|
1111 |
|
---|
1112 | SpecializedThunkJIT::JumpList doubleResult;
|
---|
1113 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
|
---|
1114 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1115 | doubleResult.link(&jit);
|
---|
1116 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1117 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "trunc");
|
---|
1118 | }
|
---|
1119 |
|
---|
1120 | MacroAssemblerCodeRef<JITThunkPtrTag> roundThunkGenerator(VM& vm)
|
---|
1121 | {
|
---|
1122 | SpecializedThunkJIT jit(vm, 1);
|
---|
1123 | if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
|
---|
1124 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1125 | MacroAssembler::Jump nonIntJump;
|
---|
1126 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
|
---|
1127 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1128 | nonIntJump.link(&jit);
|
---|
1129 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1130 | SpecializedThunkJIT::JumpList doubleResult;
|
---|
1131 | if (jit.supportsFloatingPointRounding()) {
|
---|
1132 | jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
|
---|
1133 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
|
---|
1134 |
|
---|
1135 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
|
---|
1136 | static constexpr double halfConstant = -0.5;
|
---|
1137 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT2);
|
---|
1138 | jit.addDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT2);
|
---|
1139 | MacroAssembler::Jump shouldRoundDown = jit.branchDouble(MacroAssembler::DoubleGreaterThanAndOrdered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT0);
|
---|
1140 |
|
---|
1141 | jit.moveDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT0);
|
---|
1142 | MacroAssembler::Jump continuation = jit.jump();
|
---|
1143 |
|
---|
1144 | shouldRoundDown.link(&jit);
|
---|
1145 | static constexpr double oneConstant = 1.0;
|
---|
1146 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT2);
|
---|
1147 | jit.subDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT0);
|
---|
1148 |
|
---|
1149 | continuation.link(&jit);
|
---|
1150 | } else
|
---|
1151 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
|
---|
1152 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
|
---|
1153 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1154 | doubleResult.link(&jit);
|
---|
1155 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1156 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "round");
|
---|
1157 | }
|
---|
1158 |
|
---|
1159 | MacroAssemblerCodeRef<JITThunkPtrTag> expThunkGenerator(VM& vm)
|
---|
1160 | {
|
---|
1161 | if (!UnaryDoubleOpWrapper(exp))
|
---|
1162 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1163 | SpecializedThunkJIT jit(vm, 1);
|
---|
1164 | if (!jit.supportsFloatingPoint())
|
---|
1165 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1166 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1167 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
|
---|
1168 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1169 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "exp");
|
---|
1170 | }
|
---|
1171 |
|
---|
1172 | MacroAssemblerCodeRef<JITThunkPtrTag> logThunkGenerator(VM& vm)
|
---|
1173 | {
|
---|
1174 | if (!UnaryDoubleOpWrapper(log))
|
---|
1175 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1176 | SpecializedThunkJIT jit(vm, 1);
|
---|
1177 | if (!jit.supportsFloatingPoint())
|
---|
1178 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1179 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1180 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
|
---|
1181 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1182 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "log");
|
---|
1183 | }
|
---|
1184 |
|
---|
1185 | MacroAssemblerCodeRef<JITThunkPtrTag> absThunkGenerator(VM& vm)
|
---|
1186 | {
|
---|
1187 | SpecializedThunkJIT jit(vm, 1);
|
---|
1188 | if (!jit.supportsFloatingPointAbs())
|
---|
1189 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1190 |
|
---|
1191 | #if USE(JSVALUE64)
|
---|
1192 | VirtualRegister virtualRegister = CallFrameSlot::firstArgument;
|
---|
1193 | jit.load64(AssemblyHelpers::addressFor(virtualRegister), GPRInfo::regT0);
|
---|
1194 | auto notInteger = jit.branchIfNotInt32(GPRInfo::regT0);
|
---|
1195 |
|
---|
1196 | // Abs Int32.
|
---|
1197 | jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
|
---|
1198 | jit.add32(GPRInfo::regT1, GPRInfo::regT0);
|
---|
1199 | jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
|
---|
1200 |
|
---|
1201 | // IntMin cannot be inverted.
|
---|
1202 | MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
|
---|
1203 |
|
---|
1204 | // Box and finish.
|
---|
1205 | jit.or64(GPRInfo::numberTagRegister, GPRInfo::regT0);
|
---|
1206 | MacroAssembler::Jump doneWithIntegers = jit.jump();
|
---|
1207 |
|
---|
1208 | // Handle Doubles.
|
---|
1209 | notInteger.link(&jit);
|
---|
1210 | jit.appendFailure(jit.branchIfNotNumber(GPRInfo::regT0));
|
---|
1211 | jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
|
---|
1212 | MacroAssembler::Label absFPR0Label = jit.label();
|
---|
1213 | jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
|
---|
1214 | jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
|
---|
1215 |
|
---|
1216 | // Tail.
|
---|
1217 | doneWithIntegers.link(&jit);
|
---|
1218 | jit.returnJSValue(GPRInfo::regT0);
|
---|
1219 |
|
---|
1220 | // We know the value of regT0 is IntMin. We could load that value from memory but
|
---|
1221 | // it is simpler to just convert it.
|
---|
1222 | integerIsIntMin.link(&jit);
|
---|
1223 | jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
|
---|
1224 | jit.jump().linkTo(absFPR0Label, &jit);
|
---|
1225 | #else
|
---|
1226 | MacroAssembler::Jump nonIntJump;
|
---|
1227 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
|
---|
1228 | jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
|
---|
1229 | jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
|
---|
1230 | jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
|
---|
1231 | jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
|
---|
1232 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1233 | nonIntJump.link(&jit);
|
---|
1234 | // Shame about the double int conversion here.
|
---|
1235 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1236 | jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
|
---|
1237 | jit.returnDouble(SpecializedThunkJIT::fpRegT1);
|
---|
1238 | #endif
|
---|
1239 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "abs");
|
---|
1240 | }
|
---|
1241 |
|
---|
1242 | MacroAssemblerCodeRef<JITThunkPtrTag> imulThunkGenerator(VM& vm)
|
---|
1243 | {
|
---|
1244 | SpecializedThunkJIT jit(vm, 2);
|
---|
1245 | MacroAssembler::Jump nonIntArg0Jump;
|
---|
1246 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
|
---|
1247 | SpecializedThunkJIT::Label doneLoadingArg0(&jit);
|
---|
1248 | MacroAssembler::Jump nonIntArg1Jump;
|
---|
1249 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
|
---|
1250 | SpecializedThunkJIT::Label doneLoadingArg1(&jit);
|
---|
1251 | jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
|
---|
1252 | jit.returnInt32(SpecializedThunkJIT::regT0);
|
---|
1253 |
|
---|
1254 | if (jit.supportsFloatingPointTruncate()) {
|
---|
1255 | nonIntArg0Jump.link(&jit);
|
---|
1256 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
|
---|
1257 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
|
---|
1258 | jit.appendFailure(jit.jump());
|
---|
1259 | } else
|
---|
1260 | jit.appendFailure(nonIntArg0Jump);
|
---|
1261 |
|
---|
1262 | if (jit.supportsFloatingPointTruncate()) {
|
---|
1263 | nonIntArg1Jump.link(&jit);
|
---|
1264 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
|
---|
1265 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
|
---|
1266 | jit.appendFailure(jit.jump());
|
---|
1267 | } else
|
---|
1268 | jit.appendFailure(nonIntArg1Jump);
|
---|
1269 |
|
---|
1270 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "imul");
|
---|
1271 | }
|
---|
1272 |
|
---|
1273 | MacroAssemblerCodeRef<JITThunkPtrTag> randomThunkGenerator(VM& vm)
|
---|
1274 | {
|
---|
1275 | SpecializedThunkJIT jit(vm, 0);
|
---|
1276 | if (!jit.supportsFloatingPoint())
|
---|
1277 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1278 |
|
---|
1279 | #if USE(JSVALUE64)
|
---|
1280 | jit.emitRandomThunk(vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
|
---|
1281 | jit.returnDouble(SpecializedThunkJIT::fpRegT0);
|
---|
1282 |
|
---|
1283 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "random");
|
---|
1284 | #else
|
---|
1285 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm));
|
---|
1286 | #endif
|
---|
1287 | }
|
---|
1288 |
|
---|
1289 | MacroAssemblerCodeRef<JITThunkPtrTag> boundFunctionCallGenerator(VM& vm)
|
---|
1290 | {
|
---|
1291 | CCallHelpers jit;
|
---|
1292 |
|
---|
1293 | jit.emitFunctionPrologue();
|
---|
1294 |
|
---|
1295 | // Set up our call frame.
|
---|
1296 | jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
|
---|
1297 | jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
|
---|
1298 |
|
---|
1299 | constexpr unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes();
|
---|
1300 | constexpr unsigned extraStackNeeded = stackMisalignment ? stackAlignmentBytes() - stackMisalignment : 0;
|
---|
1301 |
|
---|
1302 | // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
|
---|
1303 | // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
|
---|
1304 | // call, since that would be way too weird.
|
---|
1305 |
|
---|
1306 | // The formula for the number of stack bytes needed given some number of parameters (including
|
---|
1307 | // this) is:
|
---|
1308 | //
|
---|
1309 | // stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
|
---|
1310 | //
|
---|
1311 | // Probably we want to write this as:
|
---|
1312 | //
|
---|
1313 | // stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
|
---|
1314 | //
|
---|
1315 | // That's really all there is to this. We have all the registers we need to do it.
|
---|
1316 |
|
---|
1317 | jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT0);
|
---|
1318 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfBoundArgs()), GPRInfo::regT2);
|
---|
1319 | jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCountIncludingThis), GPRInfo::regT1);
|
---|
1320 | jit.move(GPRInfo::regT1, GPRInfo::regT3);
|
---|
1321 | auto noArgs = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2);
|
---|
1322 | jit.load32(CCallHelpers::Address(GPRInfo::regT2, JSImmutableButterfly::offsetOfPublicLength()), GPRInfo::regT2);
|
---|
1323 | jit.add32(GPRInfo::regT2, GPRInfo::regT1);
|
---|
1324 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
1325 | noArgs.link(&jit);
|
---|
1326 | jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
|
---|
1327 | jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
|
---|
1328 | jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
|
---|
1329 | jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
|
---|
1330 |
|
---|
1331 | if (extraStackNeeded)
|
---|
1332 | jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
|
---|
1333 |
|
---|
1334 | // At this point regT1 has the actual argument count, regT2 has the amount of stack we will need, and regT3 has the passed argument count.
|
---|
1335 | // Check to see if we have enough stack space.
|
---|
1336 |
|
---|
1337 | jit.negPtr(GPRInfo::regT2);
|
---|
1338 | jit.addPtr(CCallHelpers::stackPointerRegister, GPRInfo::regT2);
|
---|
1339 | CCallHelpers::Jump haveStackSpace = jit.branchPtr(CCallHelpers::BelowOrEqual, CCallHelpers::AbsoluteAddress(vm.addressOfSoftStackLimit()), GPRInfo::regT2);
|
---|
1340 |
|
---|
1341 | // Throw Stack Overflow exception
|
---|
1342 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::regT3);
|
---|
1343 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfScopeChain()), GPRInfo::regT3);
|
---|
1344 | jit.setupArguments<decltype(operationThrowStackOverflowErrorFromThunk)>(GPRInfo::regT3);
|
---|
1345 | jit.prepareCallOperation(vm);
|
---|
1346 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationThrowStackOverflowErrorFromThunk)), GPRInfo::nonArgGPR0);
|
---|
1347 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1348 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1349 | jit.jumpToExceptionHandler(vm);
|
---|
1350 |
|
---|
1351 | haveStackSpace.link(&jit);
|
---|
1352 | jit.move(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
|
---|
1353 |
|
---|
1354 | // Do basic callee frame setup, including 'this'.
|
---|
1355 |
|
---|
1356 | jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCountIncludingThis));
|
---|
1357 |
|
---|
1358 | JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT4, GPRInfo::regT2);
|
---|
1359 | jit.loadValue(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfBoundThis()), valueRegs);
|
---|
1360 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
|
---|
1361 |
|
---|
1362 | // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
|
---|
1363 | // frame to the callee's frame. Note that we know that regT3 (the argument count) must be at
|
---|
1364 | // least 1.
|
---|
1365 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT3);
|
---|
1366 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
1367 | CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT3);
|
---|
1368 |
|
---|
1369 | CCallHelpers::Label loop = jit.label();
|
---|
1370 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT3);
|
---|
1371 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
1372 | jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgumentIncludingThis(1)).indexedBy(GPRInfo::regT3, CCallHelpers::TimesEight), valueRegs);
|
---|
1373 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
|
---|
1374 | jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT3).linkTo(loop, &jit);
|
---|
1375 |
|
---|
1376 | done.link(&jit);
|
---|
1377 | auto noArgs2 = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
|
---|
1378 |
|
---|
1379 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfBoundArgs()), GPRInfo::regT3);
|
---|
1380 |
|
---|
1381 | CCallHelpers::Label loopBound = jit.label();
|
---|
1382 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
1383 | jit.loadValue(CCallHelpers::BaseIndex(GPRInfo::regT3, GPRInfo::regT1, CCallHelpers::TimesEight, JSImmutableButterfly::offsetOfData() + sizeof(WriteBarrier<Unknown>)), valueRegs);
|
---|
1384 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
|
---|
1385 | jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loopBound, &jit);
|
---|
1386 |
|
---|
1387 | noArgs2.link(&jit);
|
---|
1388 |
|
---|
1389 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT2);
|
---|
1390 | jit.storeCell(GPRInfo::regT2, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
|
---|
1391 |
|
---|
1392 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT2, JSFunction::offsetOfExecutableOrRareData()), GPRInfo::regT0);
|
---|
1393 | auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
|
---|
1394 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), GPRInfo::regT0);
|
---|
1395 | hasExecutable.link(&jit);
|
---|
1396 |
|
---|
1397 | jit.loadPtr(
|
---|
1398 | CCallHelpers::Address(
|
---|
1399 | GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
|
---|
1400 | GPRInfo::regT1);
|
---|
1401 | CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT1);
|
---|
1402 |
|
---|
1403 | auto isNative = jit.branchIfNotType(GPRInfo::regT0, FunctionExecutableType);
|
---|
1404 | jit.loadPtr(
|
---|
1405 | CCallHelpers::Address(
|
---|
1406 | GPRInfo::regT0, FunctionExecutable::offsetOfCodeBlockForCall()),
|
---|
1407 | GPRInfo::regT2);
|
---|
1408 | jit.storePtr(GPRInfo::regT2, CCallHelpers::calleeFrameCodeBlockBeforeCall());
|
---|
1409 |
|
---|
1410 | isNative.link(&jit);
|
---|
1411 |
|
---|
1412 | emitPointerValidation(jit, GPRInfo::regT1, JSEntryPtrTag);
|
---|
1413 | jit.call(GPRInfo::regT1, JSEntryPtrTag);
|
---|
1414 |
|
---|
1415 | jit.emitFunctionEpilogue();
|
---|
1416 | jit.ret();
|
---|
1417 |
|
---|
1418 | LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::BoundFunctionThunk);
|
---|
1419 | linkBuffer.link(noCode, CodeLocationLabel<JITThunkPtrTag>(vm.jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
|
---|
1420 | return FINALIZE_THUNK(linkBuffer, JITThunkPtrTag, "Specialized thunk for bound function calls with no arguments");
|
---|
1421 | }
|
---|
1422 |
|
---|
1423 | MacroAssemblerCodeRef<JITThunkPtrTag> remoteFunctionCallGenerator(VM& vm)
|
---|
1424 | {
|
---|
1425 | CCallHelpers jit;
|
---|
1426 | jit.emitFunctionPrologue();
|
---|
1427 |
|
---|
1428 | // Set up our call frame.
|
---|
1429 | jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
|
---|
1430 | jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
|
---|
1431 |
|
---|
1432 | constexpr unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes();
|
---|
1433 | constexpr unsigned extraStackNeeded = stackMisalignment ? stackAlignmentBytes() - stackMisalignment : 0;
|
---|
1434 |
|
---|
1435 | // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
|
---|
1436 | // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
|
---|
1437 | // call, since that would be way too weird.
|
---|
1438 |
|
---|
1439 | // The formula for the number of stack bytes needed given some number of parameters (including
|
---|
1440 | // this) is:
|
---|
1441 | //
|
---|
1442 | // stackAlign((numParams + numFrameLocals + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
|
---|
1443 | //
|
---|
1444 | // Probably we want to write this as:
|
---|
1445 | //
|
---|
1446 | // stackAlign((numParams + numFrameLocals + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
|
---|
1447 | static constexpr int numFrameLocals = 1;
|
---|
1448 | VirtualRegister loopIndex = virtualRegisterForLocal(0);
|
---|
1449 |
|
---|
1450 | jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT0);
|
---|
1451 | jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCountIncludingThis), GPRInfo::regT1);
|
---|
1452 |
|
---|
1453 | jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters + numFrameLocals), GPRInfo::regT1, GPRInfo::regT2);
|
---|
1454 | jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
|
---|
1455 | jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
|
---|
1456 | jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
|
---|
1457 |
|
---|
1458 | if (extraStackNeeded)
|
---|
1459 | jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
|
---|
1460 |
|
---|
1461 | // At this point regT1 has the actual argument count, and regT2 has the amount of stack we will need.
|
---|
1462 | // Check to see if we have enough stack space.
|
---|
1463 |
|
---|
1464 | jit.negPtr(GPRInfo::regT2);
|
---|
1465 | jit.addPtr(CCallHelpers::stackPointerRegister, GPRInfo::regT2);
|
---|
1466 | CCallHelpers::Jump haveStackSpace = jit.branchPtr(CCallHelpers::BelowOrEqual, CCallHelpers::AbsoluteAddress(vm.addressOfSoftStackLimit()), GPRInfo::regT2);
|
---|
1467 |
|
---|
1468 | // Throw Stack Overflow exception
|
---|
1469 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::regT3);
|
---|
1470 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSBoundFunction::offsetOfScopeChain()), GPRInfo::regT3);
|
---|
1471 | jit.setupArguments<decltype(operationThrowStackOverflowErrorFromThunk)>(GPRInfo::regT3);
|
---|
1472 | jit.prepareCallOperation(vm);
|
---|
1473 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationThrowStackOverflowErrorFromThunk)), GPRInfo::nonArgGPR0);
|
---|
1474 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1475 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1476 | jit.jumpToExceptionHandler(vm);
|
---|
1477 |
|
---|
1478 | haveStackSpace.link(&jit);
|
---|
1479 | jit.move(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
|
---|
1480 |
|
---|
1481 | // Set `this` to undefined
|
---|
1482 | // NOTE: needs concensus in TC39 (https://github.com/tc39/proposal-shadowrealm/issues/328)
|
---|
1483 | jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCountIncludingThis));
|
---|
1484 | jit.storeTrustedValue(jsUndefined(), CCallHelpers::calleeArgumentSlot(0));
|
---|
1485 |
|
---|
1486 | JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT4, GPRInfo::regT2);
|
---|
1487 |
|
---|
1488 | // Before processing the arguments loop, check that we have generated JIT code for calling
|
---|
1489 | // to avoid processing the loop twice in the slow case.
|
---|
1490 | CCallHelpers::Jump noCode;
|
---|
1491 | {
|
---|
1492 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSRemoteFunction::offsetOfTargetFunction()), GPRInfo::regT2);
|
---|
1493 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT2, JSFunction::offsetOfExecutableOrRareData()), GPRInfo::regT2);
|
---|
1494 | auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
|
---|
1495 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT2, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), GPRInfo::regT2);
|
---|
1496 | hasExecutable.link(&jit);
|
---|
1497 |
|
---|
1498 | jit.loadPtr(
|
---|
1499 | CCallHelpers::Address(
|
---|
1500 | GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
|
---|
1501 | GPRInfo::regT2);
|
---|
1502 | noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2);
|
---|
1503 | }
|
---|
1504 |
|
---|
1505 | CCallHelpers::JumpList exceptionChecks;
|
---|
1506 |
|
---|
1507 | // Argument processing loop:
|
---|
1508 | // For each argument (order should not be observable):
|
---|
1509 | // if the value is a Primitive, copy it into the new call frame arguments, otherwise
|
---|
1510 | // perform wrapping logic. If the wrapping logic results in a new JSRemoteFunction,
|
---|
1511 | // copy it into the new call frame's arguments, otherwise it must have thrown a TypeError.
|
---|
1512 | CCallHelpers::Jump done = jit.branchSub32(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
|
---|
1513 | {
|
---|
1514 | CCallHelpers::Label loop = jit.label();
|
---|
1515 | jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgumentIncludingThis(0)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
|
---|
1516 |
|
---|
1517 | CCallHelpers::JumpList valueIsPrimitive;
|
---|
1518 | valueIsPrimitive.append(jit.branchIfNotCell(valueRegs));
|
---|
1519 | valueIsPrimitive.append(jit.branchIfNotObject(valueRegs.payloadGPR()));
|
---|
1520 |
|
---|
1521 | jit.storePtr(GPRInfo::regT1, jit.addressFor(loopIndex));
|
---|
1522 |
|
---|
1523 | jit.setupArguments<decltype(operationGetWrappedValueForTarget)>(GPRInfo::regT0, valueRegs);
|
---|
1524 | jit.prepareCallOperation(vm);
|
---|
1525 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationGetWrappedValueForTarget)), GPRInfo::nonArgGPR0);
|
---|
1526 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1527 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1528 | exceptionChecks.append(jit.emitJumpIfException(vm));
|
---|
1529 |
|
---|
1530 | jit.setupResults(valueRegs);
|
---|
1531 | jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT0);
|
---|
1532 |
|
---|
1533 | jit.loadPtr(jit.addressFor(loopIndex), GPRInfo::regT1);
|
---|
1534 |
|
---|
1535 | valueIsPrimitive.link(&jit);
|
---|
1536 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
|
---|
1537 | jit.branchSub32(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), GPRInfo::regT1).linkTo(loop, &jit);
|
---|
1538 |
|
---|
1539 | done.link(&jit);
|
---|
1540 | }
|
---|
1541 |
|
---|
1542 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSRemoteFunction::offsetOfTargetFunction()), GPRInfo::regT2);
|
---|
1543 | jit.storeCell(GPRInfo::regT2, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
|
---|
1544 |
|
---|
1545 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT2, JSFunction::offsetOfExecutableOrRareData()), GPRInfo::regT1);
|
---|
1546 | auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT1, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
|
---|
1547 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT1, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), GPRInfo::regT1);
|
---|
1548 | hasExecutable.link(&jit);
|
---|
1549 |
|
---|
1550 | jit.loadPtr(
|
---|
1551 | CCallHelpers::Address(
|
---|
1552 | GPRInfo::regT1, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
|
---|
1553 | GPRInfo::regT2);
|
---|
1554 | auto codeExists = jit.branchTestPtr(CCallHelpers::NonZero, GPRInfo::regT2);
|
---|
1555 |
|
---|
1556 | // The calls to operationGetWrappedValueForTarget above may GC, and any GC can potentially jettison the JIT code in the target JSFunction.
|
---|
1557 | // If we find that the JIT code is null (i.e. has been jettisoned), then we need to re-materialize it for the call below. Note that we know
|
---|
1558 | // that operationMaterializeRemoteFunctionTargetCode should be able to re-materialize the JIT code (except for any OOME) because we only
|
---|
1559 | // went down this code path after we found a non-null JIT code (in the noCode check) above i.e. it should be possible to materialize the JIT code.
|
---|
1560 | // FIXME: Windows x64 is not supported since operationMaterializeRemoteFunctionTargetCode returns SlowPathReturnType.
|
---|
1561 | jit.setupArguments<decltype(operationMaterializeRemoteFunctionTargetCode)>(GPRInfo::regT0);
|
---|
1562 | jit.prepareCallOperation(vm);
|
---|
1563 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationMaterializeRemoteFunctionTargetCode)), GPRInfo::nonArgGPR0);
|
---|
1564 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1565 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1566 | exceptionChecks.append(jit.emitJumpIfException(vm));
|
---|
1567 | jit.storePtr(GPRInfo::returnValueGPR2, CCallHelpers::calleeFrameCodeBlockBeforeCall());
|
---|
1568 | jit.move(GPRInfo::returnValueGPR, GPRInfo::regT2);
|
---|
1569 | auto materialized = jit.jump();
|
---|
1570 |
|
---|
1571 | codeExists.link(&jit);
|
---|
1572 | auto isNative = jit.branchIfNotType(GPRInfo::regT1, FunctionExecutableType);
|
---|
1573 | jit.loadPtr(
|
---|
1574 | CCallHelpers::Address(
|
---|
1575 | GPRInfo::regT1, FunctionExecutable::offsetOfCodeBlockForCall()),
|
---|
1576 | GPRInfo::regT3);
|
---|
1577 | jit.storePtr(GPRInfo::regT3, CCallHelpers::calleeFrameCodeBlockBeforeCall());
|
---|
1578 |
|
---|
1579 | isNative.link(&jit);
|
---|
1580 | materialized.link(&jit);
|
---|
1581 | // Based on the check above, we should be good with this. On ARM64, emitPointerValidation will do this.
|
---|
1582 | #if ASSERT_ENABLED && !CPU(ARM64E)
|
---|
1583 | {
|
---|
1584 | CCallHelpers::Jump checkNotNull = jit.branchTestPtr(CCallHelpers::NonZero, GPRInfo::regT2);
|
---|
1585 | jit.abortWithReason(TGInvalidPointer);
|
---|
1586 | checkNotNull.link(&jit);
|
---|
1587 | }
|
---|
1588 | #endif
|
---|
1589 |
|
---|
1590 | emitPointerValidation(jit, GPRInfo::regT2, JSEntryPtrTag);
|
---|
1591 | jit.call(GPRInfo::regT2, JSEntryPtrTag);
|
---|
1592 |
|
---|
1593 | // Wrap return value
|
---|
1594 | constexpr JSValueRegs resultRegs = JSRInfo::returnValueJSR;
|
---|
1595 |
|
---|
1596 | CCallHelpers::JumpList resultIsPrimitive;
|
---|
1597 | resultIsPrimitive.append(jit.branchIfNotCell(resultRegs));
|
---|
1598 | resultIsPrimitive.append(jit.branchIfNotObject(resultRegs.payloadGPR()));
|
---|
1599 |
|
---|
1600 | jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT2);
|
---|
1601 | jit.setupArguments<decltype(operationGetWrappedValueForCaller)>(GPRInfo::regT2, resultRegs);
|
---|
1602 | jit.prepareCallOperation(vm);
|
---|
1603 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationGetWrappedValueForCaller)), GPRInfo::nonArgGPR0);
|
---|
1604 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1605 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1606 | exceptionChecks.append(jit.emitJumpIfException(vm));
|
---|
1607 |
|
---|
1608 | resultIsPrimitive.link(&jit);
|
---|
1609 | jit.emitFunctionEpilogue();
|
---|
1610 | jit.ret();
|
---|
1611 |
|
---|
1612 | exceptionChecks.link(&jit);
|
---|
1613 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, GPRInfo::argumentGPR0);
|
---|
1614 | jit.setupArguments<decltype(operationLookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&vm));
|
---|
1615 | jit.prepareCallOperation(vm);
|
---|
1616 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationLookupExceptionHandler)), GPRInfo::nonArgGPR0);
|
---|
1617 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1618 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
|
---|
1619 |
|
---|
1620 | jit.jumpToExceptionHandler(vm);
|
---|
1621 |
|
---|
1622 | LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::RemoteFunctionThunk);
|
---|
1623 | linkBuffer.link(noCode, CodeLocationLabel<JITThunkPtrTag>(vm.jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
|
---|
1624 | return FINALIZE_THUNK(linkBuffer, JITThunkPtrTag, "Specialized thunk for remote function calls");
|
---|
1625 | }
|
---|
1626 |
|
---|
1627 | } // namespace JSC
|
---|
1628 |
|
---|
1629 | #endif // ENABLE(JIT)
|
---|