source: webkit/trunk/JavaScriptCore/jit/JITInlineMethods.h@ 43311

Last change on this file since 43311 was 43220, checked in by [email protected], 16 years ago

Bug 25559: Improve native function call performance
<https://bugs.webkit.org/show_bug.cgi?id=25559>

Reviewed by Gavin Barraclough

In order to cache calls to native functions we now make the standard
prototype functions use a small assembly thunk that converts the JS
calling convention into the native calling convention. As this is
only beneficial in the JIT we use the NativeFunctionWrapper typedef
to alternate between PrototypeFunction and JSFunction to keep the
code sane. This change from PrototypeFunction to NativeFunctionWrapper
is the bulk of this patch.

File size: 13.8 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef JITInlineMethods_h
27#define JITInlineMethods_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(JIT)
32
33#if PLATFORM(WIN)
34#undef FIELD_OFFSET // Fix conflict with winnt.h.
35#endif
36
37// FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.
38// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
39// NULL can cause compiler problems, especially in cases of multiple inheritance.
40#define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
41
42namespace JSC {
43
44ALWAYS_INLINE void JIT::killLastResultRegister()
45{
46 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
47}
48
49// get arg puts an arg from the SF register array into a h/w register
50ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
51{
52 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
53
54 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
55 if (m_codeBlock->isConstantRegisterIndex(src)) {
56 JSValue value = m_codeBlock->getConstant(src);
57 move(ImmPtr(JSValue::encode(value)), dst);
58 killLastResultRegister();
59 return;
60 }
61
62 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
63 bool atJumpTarget = false;
64 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
65 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
66 atJumpTarget = true;
67 ++m_jumpTargetsPosition;
68 }
69
70 if (!atJumpTarget) {
71 // The argument we want is already stored in eax
72 if (dst != cachedResultRegister)
73 move(cachedResultRegister, dst);
74 killLastResultRegister();
75 return;
76 }
77 }
78
79 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
80 killLastResultRegister();
81}
82
83ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
84{
85 if (src2 == m_lastResultBytecodeRegister) {
86 emitGetVirtualRegister(src2, dst2);
87 emitGetVirtualRegister(src1, dst1);
88 } else {
89 emitGetVirtualRegister(src1, dst1);
90 emitGetVirtualRegister(src2, dst2);
91 }
92}
93
94// puts an arg onto the stack, as an arg to a context threaded function.
95ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
96{
97 poke(src, argumentNumber);
98}
99
100ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
101{
102 poke(Imm32(value), argumentNumber);
103}
104
105ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
106{
107 poke(ImmPtr(value), argumentNumber);
108}
109
110ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
111{
112 peek(dst, argumentNumber);
113}
114
115ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
116{
117 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
118 return m_codeBlock->getConstant(src);
119}
120
121ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
122{
123 return getConstantOperand(src).getInt32Fast();
124}
125
126ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
127{
128 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast();
129}
130
131// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
132ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
133{
134 if (m_codeBlock->isConstantRegisterIndex(src)) {
135 JSValue value = m_codeBlock->getConstant(src);
136 emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
137 } else {
138 loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
139 emitPutJITStubArg(scratch, argumentNumber);
140 }
141
142 killLastResultRegister();
143}
144
145ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name)
146{
147 poke(ImmPtr(value), name);
148}
149
150ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name)
151{
152 poke(from, name);
153}
154
155ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to)
156{
157 peek(to, name);
158 killLastResultRegister();
159}
160
161ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
162{
163 storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
164}
165
166ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
167{
168 storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
169}
170
171ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
172{
173 loadPtr(Address(from, entry * sizeof(Register)), to);
174 killLastResultRegister();
175}
176
177ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to)
178{
179 load32(Address(callFrameRegister, entry * sizeof(Register)), to);
180 killLastResultRegister();
181}
182
183ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
184{
185 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
186 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
187 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
188}
189
190ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
191{
192 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
193 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
194}
195
196ALWAYS_INLINE JIT::Call JIT::emitNakedCall(void* function)
197{
198 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
199
200 Call nakedCall = nearCall();
201 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function));
202 return nakedCall;
203}
204
205#if USE(JIT_STUB_ARGUMENT_REGISTER)
206ALWAYS_INLINE void JIT::restoreArgumentReference()
207{
208 move(stackPointerRegister, firstArgumentRegister);
209 emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
210}
211ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
212{
213 // In the trampoline on x86-64, the first argument register is not overwritten.
214#if !PLATFORM(X86_64)
215 move(stackPointerRegister, firstArgumentRegister);
216 addPtr(Imm32(sizeof(void*)), firstArgumentRegister);
217#endif
218}
219#elif USE(JIT_STUB_ARGUMENT_STACK)
220ALWAYS_INLINE void JIT::restoreArgumentReference()
221{
222 poke(stackPointerRegister);
223 emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
224}
225ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
226#else // JIT_STUB_ARGUMENT_VA_LIST
227ALWAYS_INLINE void JIT::restoreArgumentReference()
228{
229 emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
230}
231ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
232#endif
233
234ALWAYS_INLINE JIT::Call JIT::emitCTICall_internal(void* helper)
235{
236 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
237
238#if ENABLE(OPCODE_SAMPLING)
239 sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, true);
240#endif
241 restoreArgumentReference();
242 Call ctiCall = call();
243 m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper));
244#if ENABLE(OPCODE_SAMPLING)
245 sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, false);
246#endif
247 killLastResultRegister();
248
249 return ctiCall;
250}
251
252ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
253{
254 return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
255}
256
257ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
258{
259#if USE(ALTERNATE_JSIMMEDIATE)
260 return branchTestPtr(Zero, reg, tagMaskRegister);
261#else
262 return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
263#endif
264}
265
266ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
267{
268 move(reg1, scratch);
269 orPtr(reg2, scratch);
270 return emitJumpIfJSCell(scratch);
271}
272
273ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
274{
275 addSlowCase(emitJumpIfJSCell(reg));
276}
277
278ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
279{
280#if USE(ALTERNATE_JSIMMEDIATE)
281 return branchTestPtr(NonZero, reg, tagMaskRegister);
282#else
283 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
284#endif
285}
286
287ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
288{
289 addSlowCase(emitJumpIfNotJSCell(reg));
290}
291
292ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
293{
294 if (!m_codeBlock->isKnownNotImmediate(vReg))
295 emitJumpSlowCaseIfNotJSCell(reg);
296}
297
298ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
299{
300 if (!m_codeBlock->isKnownNotImmediate(vReg))
301 linkSlowCase(iter);
302}
303
304#if USE(ALTERNATE_JSIMMEDIATE)
305ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
306{
307 return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
308}
309ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
310{
311 return branchTestPtr(Zero, reg, tagTypeNumberRegister);
312}
313#endif
314
315ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
316{
317#if USE(ALTERNATE_JSIMMEDIATE)
318 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
319#else
320 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
321#endif
322}
323
324ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
325{
326#if USE(ALTERNATE_JSIMMEDIATE)
327 return branchPtr(Below, reg, tagTypeNumberRegister);
328#else
329 return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
330#endif
331}
332
333ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
334{
335 move(reg1, scratch);
336 andPtr(reg2, scratch);
337 return emitJumpIfNotImmediateInteger(scratch);
338}
339
340ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
341{
342 addSlowCase(emitJumpIfNotImmediateInteger(reg));
343}
344
345ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
346{
347 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
348}
349
350#if !USE(ALTERNATE_JSIMMEDIATE)
351ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
352{
353 subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
354}
355
356ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
357{
358 return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
359}
360#endif
361
362ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
363{
364#if USE(ALTERNATE_JSIMMEDIATE)
365 emitFastArithIntToImmNoCheck(src, dest);
366#else
367 if (src != dest)
368 move(src, dest);
369 addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
370#endif
371}
372
373ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
374{
375#if USE(ALTERNATE_JSIMMEDIATE)
376 UNUSED_PARAM(reg);
377#else
378 rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
379#endif
380}
381
382// operand is int32_t, must have been zero-extended if register is 64-bit.
383ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
384{
385#if USE(ALTERNATE_JSIMMEDIATE)
386 if (src != dest)
387 move(src, dest);
388 orPtr(tagTypeNumberRegister, dest);
389#else
390 signExtend32ToPtr(src, dest);
391 addPtr(dest, dest);
392 emitFastArithReTagImmediate(dest, dest);
393#endif
394}
395
396ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
397{
398 lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
399 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
400}
401
402ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
403{
404 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
405
406 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
407}
408
409ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
410{
411 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
412
413 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
414}
415
416ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
417{
418 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
419
420 jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
421}
422
423}
424
425#endif // ENABLE(JIT)
426
427#endif
Note: See TracBrowser for help on using the repository browser.