source: webkit/trunk/JavaScriptCore/jit/JITInlineMethods.h@ 44886

Last change on this file since 44886 was 44886, checked in by [email protected], 16 years ago

2009-06-19 Gabor Loki <[email protected]>

Reviewed by Gavin Barraclough.

Reorganize ARM architecture specific macros.
Use PLATFORM_ARM_ARCH(7) instead of PLATFORM(ARM_V7).

Bug 24986: ARM JIT port
<https://bugs.webkit.org/show_bug.cgi?id=24986>

  • assembler/ARMv7Assembler.h:
  • assembler/AbstractMacroAssembler.h: (JSC::AbstractMacroAssembler::Imm32::Imm32):
  • assembler/MacroAssembler.h:
  • assembler/MacroAssemblerCodeRef.h: (JSC::MacroAssemblerCodePtr::MacroAssemblerCodePtr):
  • jit/ExecutableAllocator.h: (JSC::ExecutableAllocator::cacheFlush):
  • jit/JIT.h:
  • jit/JITInlineMethods.h: (JSC::JIT::restoreArgumentReferenceForTrampoline):
  • jit/JITStubs.cpp:
  • jit/JITStubs.h:
  • wtf/Platform.h:
  • yarr/RegexJIT.cpp: (JSC::Yarr::RegexGenerator::generateEnter): (JSC::Yarr::RegexGenerator::generateReturn):
File size: 15.1 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef JITInlineMethods_h
27#define JITInlineMethods_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(JIT)
32
33#if PLATFORM(WIN)
34#undef FIELD_OFFSET // Fix conflict with winnt.h.
35#endif
36
37namespace JSC {
38
39ALWAYS_INLINE void JIT::killLastResultRegister()
40{
41 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
42}
43
44// get arg puts an arg from the SF register array into a h/w register
45ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
46{
47 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
48
49 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
50 if (m_codeBlock->isConstantRegisterIndex(src)) {
51 JSValue value = m_codeBlock->getConstant(src);
52 move(ImmPtr(JSValue::encode(value)), dst);
53 killLastResultRegister();
54 return;
55 }
56
57 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
58 bool atJumpTarget = false;
59 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
60 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
61 atJumpTarget = true;
62 ++m_jumpTargetsPosition;
63 }
64
65 if (!atJumpTarget) {
66 // The argument we want is already stored in eax
67 if (dst != cachedResultRegister)
68 move(cachedResultRegister, dst);
69 killLastResultRegister();
70 return;
71 }
72 }
73
74 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
75 killLastResultRegister();
76}
77
78ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
79{
80 if (src2 == m_lastResultBytecodeRegister) {
81 emitGetVirtualRegister(src2, dst2);
82 emitGetVirtualRegister(src1, dst1);
83 } else {
84 emitGetVirtualRegister(src1, dst1);
85 emitGetVirtualRegister(src2, dst2);
86 }
87}
88
89// puts an arg onto the stack, as an arg to a context threaded function.
90ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
91{
92 poke(src, argumentNumber);
93}
94
95ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
96{
97 poke(Imm32(value), argumentNumber);
98}
99
100ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
101{
102 poke(ImmPtr(value), argumentNumber);
103}
104
105ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
106{
107 peek(dst, argumentNumber);
108}
109
110ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
111{
112 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
113 return m_codeBlock->getConstant(src);
114}
115
116ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
117{
118 return getConstantOperand(src).getInt32Fast();
119}
120
121ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
122{
123 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast();
124}
125
126// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
127ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
128{
129 if (m_codeBlock->isConstantRegisterIndex(src)) {
130 JSValue value = m_codeBlock->getConstant(src);
131 emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
132 } else {
133 loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
134 emitPutJITStubArg(scratch, argumentNumber);
135 }
136
137 killLastResultRegister();
138}
139
140ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
141{
142 storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
143}
144
145ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
146{
147 storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
148}
149
150ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
151{
152 loadPtr(Address(from, entry * sizeof(Register)), to);
153 killLastResultRegister();
154}
155
156ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
157{
158 load32(Address(from, entry * sizeof(Register)), to);
159 killLastResultRegister();
160}
161
162ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
163{
164 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
165 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
166 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
167}
168
169ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
170{
171 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
172 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
173}
174
175ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
176{
177 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
178
179 Call nakedCall = nearCall();
180 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
181 return nakedCall;
182}
183
184#if PLATFORM(X86) || PLATFORM(X86_64)
185
186ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
187{
188 pop(reg);
189}
190
191ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
192{
193 push(reg);
194}
195
196ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
197{
198 push(address);
199}
200
201#elif PLATFORM_ARM_ARCH(7)
202
203ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
204{
205 move(linkRegister, reg);
206}
207
208ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
209{
210 move(reg, linkRegister);
211}
212
213ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
214{
215 loadPtr(address, linkRegister);
216}
217
218#endif
219
220#if USE(JIT_STUB_ARGUMENT_VA_LIST)
221ALWAYS_INLINE void JIT::restoreArgumentReference()
222{
223 poke(callFrameRegister, FIELD_OFFSET(struct JITStackFrame, callFrame) / sizeof (void*));
224}
225ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
226#else
227ALWAYS_INLINE void JIT::restoreArgumentReference()
228{
229 move(stackPointerRegister, firstArgumentRegister);
230 poke(callFrameRegister, FIELD_OFFSET(struct JITStackFrame, callFrame) / sizeof (void*));
231}
232ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
233{
234#if PLATFORM(X86)
235 // Within a trampoline the return address will be on the stack at this point.
236 addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
237#elif PLATFORM_ARM_ARCH(7)
238 move(stackPointerRegister, firstArgumentRegister);
239#endif
240 // In the trampoline on x86-64, the first argument register is not overwritten.
241}
242#endif
243
244ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
245{
246 return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
247}
248
249ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
250{
251#if USE(ALTERNATE_JSIMMEDIATE)
252 return branchTestPtr(Zero, reg, tagMaskRegister);
253#else
254 return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
255#endif
256}
257
258ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
259{
260 move(reg1, scratch);
261 orPtr(reg2, scratch);
262 return emitJumpIfJSCell(scratch);
263}
264
265ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
266{
267 addSlowCase(emitJumpIfJSCell(reg));
268}
269
270ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
271{
272#if USE(ALTERNATE_JSIMMEDIATE)
273 return branchTestPtr(NonZero, reg, tagMaskRegister);
274#else
275 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
276#endif
277}
278
279ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
280{
281 addSlowCase(emitJumpIfNotJSCell(reg));
282}
283
284ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
285{
286 if (!m_codeBlock->isKnownNotImmediate(vReg))
287 emitJumpSlowCaseIfNotJSCell(reg);
288}
289
290ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
291{
292 if (!m_codeBlock->isKnownNotImmediate(vReg))
293 linkSlowCase(iter);
294}
295
296#if USE(ALTERNATE_JSIMMEDIATE)
297ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
298{
299 return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
300}
301ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
302{
303 return branchTestPtr(Zero, reg, tagTypeNumberRegister);
304}
305#endif
306
307ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
308{
309#if USE(ALTERNATE_JSIMMEDIATE)
310 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
311#else
312 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
313#endif
314}
315
316ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
317{
318#if USE(ALTERNATE_JSIMMEDIATE)
319 return branchPtr(Below, reg, tagTypeNumberRegister);
320#else
321 return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
322#endif
323}
324
325ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
326{
327 move(reg1, scratch);
328 andPtr(reg2, scratch);
329 return emitJumpIfNotImmediateInteger(scratch);
330}
331
332ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
333{
334 addSlowCase(emitJumpIfNotImmediateInteger(reg));
335}
336
337ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
338{
339 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
340}
341
342#if !USE(ALTERNATE_JSIMMEDIATE)
343ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
344{
345 subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
346}
347
348ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
349{
350 return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
351}
352#endif
353
354ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
355{
356#if USE(ALTERNATE_JSIMMEDIATE)
357 emitFastArithIntToImmNoCheck(src, dest);
358#else
359 if (src != dest)
360 move(src, dest);
361 addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
362#endif
363}
364
365ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
366{
367#if USE(ALTERNATE_JSIMMEDIATE)
368 UNUSED_PARAM(reg);
369#else
370 rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
371#endif
372}
373
374// operand is int32_t, must have been zero-extended if register is 64-bit.
375ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
376{
377#if USE(ALTERNATE_JSIMMEDIATE)
378 if (src != dest)
379 move(src, dest);
380 orPtr(tagTypeNumberRegister, dest);
381#else
382 signExtend32ToPtr(src, dest);
383 addPtr(dest, dest);
384 emitFastArithReTagImmediate(dest, dest);
385#endif
386}
387
388ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
389{
390 lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
391 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
392}
393
394ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
395{
396 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
397
398 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
399}
400
401ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
402{
403 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
404
405 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
406}
407
408ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
409{
410 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
411
412 jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
413}
414
415#if ENABLE(SAMPLING_FLAGS)
416ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
417{
418 ASSERT(flag >= 1);
419 ASSERT(flag <= 32);
420 or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
421}
422
423ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
424{
425 ASSERT(flag >= 1);
426 ASSERT(flag <= 32);
427 and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
428}
429#endif
430
431#if ENABLE(SAMPLING_COUNTERS)
432ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
433{
434#if PLATFORM(X86_64) // Or any other 64-bit plattform.
435 addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
436#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
437 intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
438 add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
439 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
440#else
441#error "SAMPLING_FLAGS not implemented on this platform."
442#endif
443}
444#endif
445
446#if ENABLE(OPCODE_SAMPLING)
447#if PLATFORM(X86_64)
448ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
449{
450 move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
451 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
452}
453#else
454ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
455{
456 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
457}
458#endif
459#endif
460
461#if ENABLE(CODEBLOCK_SAMPLING)
462#if PLATFORM(X86_64)
463ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
464{
465 move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
466 storePtr(ImmPtr(codeBlock), X86::ecx);
467}
468#else
469ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
470{
471 storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
472}
473#endif
474#endif
475}
476
477#endif // ENABLE(JIT)
478
479#endif
Note: See TracBrowser for help on using the repository browser.