source: webkit/trunk/JavaScriptCore/VM/CTI.cpp@ 37433

Last change on this file since 37433 was 37433, checked in by Darin Adler, 17 years ago

JavaScriptCore:

2008-10-08 Darin Adler <Darin Adler>

Reviewed by Cameron Zwarich.

Add CallFrame as a synonym for ExecState. Arguably, some day we should switch every
client over to the new name.

Use CallFrame* consistently rather than Register* or ExecState* in low-level code such
as Machine.cpp and CTI.cpp. Similarly, use callFrame rather than r as its name and use
accessor functions to get at things in the frame.

Eliminate other uses of ExecState* that aren't needed, replacing in some cases with
JSGlobalData* and in other cases eliminating them entirely.

  • API/JSObjectRef.cpp: (JSObjectMakeFunctionWithCallback): (JSObjectMakeFunction): (JSObjectHasProperty): (JSObjectGetProperty): (JSObjectSetProperty): (JSObjectDeleteProperty):
  • API/OpaqueJSString.cpp:
  • API/OpaqueJSString.h:
  • VM/CTI.cpp: (JSC::CTI::getConstant): (JSC::CTI::emitGetArg): (JSC::CTI::emitGetPutArg): (JSC::CTI::getConstantImmediateNumericArg): (JSC::CTI::printOpcodeOperandTypes): (JSC::CTI::CTI): (JSC::CTI::compileOpCall): (JSC::CTI::compileBinaryArithOp): (JSC::CTI::privateCompileMainPass): (JSC::CTI::privateCompile): (JSC::CTI::privateCompileGetByIdProto): (JSC::CTI::privateCompileGetByIdChain): (JSC::CTI::compileRegExp):
  • VM/CTI.h:
  • VM/CodeBlock.h:
  • VM/CodeGenerator.cpp: (JSC::CodeGenerator::emitEqualityOp): (JSC::CodeGenerator::emitLoad): (JSC::CodeGenerator::emitUnexpectedLoad): (JSC::CodeGenerator::emitConstruct):
  • VM/CodeGenerator.h:
  • VM/Machine.cpp: (JSC::jsLess): (JSC::jsLessEq): (JSC::jsAddSlowCase): (JSC::jsAdd): (JSC::jsTypeStringForValue): (JSC::Machine::resolve): (JSC::Machine::resolveSkip): (JSC::Machine::resolveGlobal): (JSC::inlineResolveBase): (JSC::Machine::resolveBase): (JSC::Machine::resolveBaseAndProperty): (JSC::Machine::resolveBaseAndFunc): (JSC::Machine::slideRegisterWindowForCall): (JSC::isNotObject): (JSC::Machine::callEval): (JSC::Machine::dumpCallFrame): (JSC::Machine::dumpRegisters): (JSC::Machine::unwindCallFrame): (JSC::Machine::throwException): (JSC::DynamicGlobalObjectScope::DynamicGlobalObjectScope): (JSC::DynamicGlobalObjectScope::~DynamicGlobalObjectScope): (JSC::Machine::execute): (JSC::Machine::debug): (JSC::Machine::createExceptionScope): (JSC::cachePrototypeChain): (JSC::Machine::tryCachePutByID): (JSC::Machine::tryCacheGetByID): (JSC::Machine::privateExecute): (JSC::Machine::retrieveArguments): (JSC::Machine::retrieveCaller): (JSC::Machine::retrieveLastCaller): (JSC::Machine::findFunctionCallFrame): (JSC::Machine::getArgumentsData): (JSC::Machine::tryCTICachePutByID): (JSC::Machine::getCTIArrayLengthTrampoline): (JSC::Machine::getCTIStringLengthTrampoline): (JSC::Machine::tryCTICacheGetByID): (JSC::Machine::cti_op_convert_this): (JSC::Machine::cti_op_end): (JSC::Machine::cti_op_add): (JSC::Machine::cti_op_pre_inc): (JSC::Machine::cti_timeout_check): (JSC::Machine::cti_op_loop_if_less): (JSC::Machine::cti_op_loop_if_lesseq): (JSC::Machine::cti_op_new_object): (JSC::Machine::cti_op_put_by_id): (JSC::Machine::cti_op_put_by_id_second): (JSC::Machine::cti_op_put_by_id_generic): (JSC::Machine::cti_op_put_by_id_fail): (JSC::Machine::cti_op_get_by_id): (JSC::Machine::cti_op_get_by_id_second): (JSC::Machine::cti_op_get_by_id_generic): (JSC::Machine::cti_op_get_by_id_fail): (JSC::Machine::cti_op_instanceof): (JSC::Machine::cti_op_del_by_id): (JSC::Machine::cti_op_mul): (JSC::Machine::cti_op_new_func): (JSC::Machine::cti_op_call_JSFunction): (JSC::Machine::cti_vm_compile): (JSC::Machine::cti_op_push_activation): (JSC::Machine::cti_op_call_NotJSFunction): (JSC::Machine::cti_op_create_arguments): (JSC::Machine::cti_op_tear_off_activation): (JSC::Machine::cti_op_tear_off_arguments): (JSC::Machine::cti_op_ret_profiler): (JSC::Machine::cti_op_ret_scopeChain): (JSC::Machine::cti_op_new_array): (JSC::Machine::cti_op_resolve): (JSC::Machine::cti_op_construct_JSConstruct): (JSC::Machine::cti_op_construct_NotJSConstruct): (JSC::Machine::cti_op_get_by_val): (JSC::Machine::cti_op_resolve_func): (JSC::Machine::cti_op_sub): (JSC::Machine::cti_op_put_by_val): (JSC::Machine::cti_op_put_by_val_array): (JSC::Machine::cti_op_lesseq): (JSC::Machine::cti_op_loop_if_true): (JSC::Machine::cti_op_negate): (JSC::Machine::cti_op_resolve_base): (JSC::Machine::cti_op_resolve_skip): (JSC::Machine::cti_op_resolve_global): (JSC::Machine::cti_op_div): (JSC::Machine::cti_op_pre_dec): (JSC::Machine::cti_op_jless): (JSC::Machine::cti_op_not): (JSC::Machine::cti_op_jtrue): (JSC::Machine::cti_op_post_inc): (JSC::Machine::cti_op_eq): (JSC::Machine::cti_op_lshift): (JSC::Machine::cti_op_bitand): (JSC::Machine::cti_op_rshift): (JSC::Machine::cti_op_bitnot): (JSC::Machine::cti_op_resolve_with_base): (JSC::Machine::cti_op_new_func_exp): (JSC::Machine::cti_op_mod): (JSC::Machine::cti_op_less): (JSC::Machine::cti_op_neq): (JSC::Machine::cti_op_post_dec): (JSC::Machine::cti_op_urshift): (JSC::Machine::cti_op_bitxor): (JSC::Machine::cti_op_new_regexp): (JSC::Machine::cti_op_bitor): (JSC::Machine::cti_op_call_eval): (JSC::Machine::cti_op_throw): (JSC::Machine::cti_op_get_pnames): (JSC::Machine::cti_op_next_pname): (JSC::Machine::cti_op_push_scope): (JSC::Machine::cti_op_pop_scope): (JSC::Machine::cti_op_typeof): (JSC::Machine::cti_op_to_jsnumber): (JSC::Machine::cti_op_in): (JSC::Machine::cti_op_push_new_scope): (JSC::Machine::cti_op_jmp_scopes): (JSC::Machine::cti_op_put_by_index): (JSC::Machine::cti_op_switch_imm): (JSC::Machine::cti_op_switch_char): (JSC::Machine::cti_op_switch_string): (JSC::Machine::cti_op_del_by_val): (JSC::Machine::cti_op_put_getter): (JSC::Machine::cti_op_put_setter): (JSC::Machine::cti_op_new_error): (JSC::Machine::cti_op_debug): (JSC::Machine::cti_vm_throw):
  • VM/Machine.h:
  • VM/Register.h:
  • VM/RegisterFile.h:
  • kjs/Arguments.h:
  • kjs/DebuggerCallFrame.cpp: (JSC::DebuggerCallFrame::functionName): (JSC::DebuggerCallFrame::type): (JSC::DebuggerCallFrame::thisObject): (JSC::DebuggerCallFrame::evaluate):
  • kjs/DebuggerCallFrame.h:
  • kjs/ExecState.cpp: (JSC::CallFrame::thisValue):
  • kjs/ExecState.h:
  • kjs/FunctionConstructor.cpp: (JSC::constructFunction):
  • kjs/JSActivation.cpp: (JSC::JSActivation::JSActivation): (JSC::JSActivation::argumentsGetter):
  • kjs/JSActivation.h:
  • kjs/JSGlobalObject.cpp: (JSC::JSGlobalObject::init):
  • kjs/JSGlobalObjectFunctions.cpp: (JSC::globalFuncEval):
  • kjs/JSVariableObject.h:
  • kjs/Parser.cpp: (JSC::Parser::parse):
  • kjs/RegExpConstructor.cpp: (JSC::constructRegExp):
  • kjs/RegExpPrototype.cpp: (JSC::regExpProtoFuncCompile):
  • kjs/Shell.cpp: (prettyPrintScript):
  • kjs/StringPrototype.cpp: (JSC::stringProtoFuncMatch): (JSC::stringProtoFuncSearch):
  • kjs/identifier.cpp: (JSC::Identifier::checkSameIdentifierTable):
  • kjs/interpreter.cpp: (JSC::Interpreter::checkSyntax): (JSC::Interpreter::evaluate):
  • kjs/nodes.cpp: (JSC::ThrowableExpressionData::emitThrowError): (JSC::RegExpNode::emitCode): (JSC::ArrayNode::emitCode): (JSC::InstanceOfNode::emitCode):
  • kjs/nodes.h:
  • kjs/regexp.cpp: (JSC::RegExp::RegExp): (JSC::RegExp::create):
  • kjs/regexp.h:
  • profiler/HeavyProfile.h:
  • profiler/Profile.h:
  • wrec/WREC.cpp:
  • wrec/WREC.h:

WebKit/mac:

2008-10-08 Darin Adler <Darin Adler>

Reviewed by Cameron Zwarich.

  • WebView/WebScriptDebugger.mm: (WebScriptDebugger::WebScriptDebugger): Update since DebuggerCallFrame is simpler now.
File size: 131.7 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CTI.h"
28
29#if ENABLE(CTI)
30
31#include "CodeBlock.h"
32#include "JSArray.h"
33#include "JSFunction.h"
34#include "Machine.h"
35#include "wrec/WREC.h"
36#include "ResultType.h"
37
38#if PLATFORM(MAC)
39#include <sys/sysctl.h>
40#endif
41
42using namespace std;
43
44namespace JSC {
45
46#if PLATFORM(MAC)
47
48static inline bool isSSE2Present()
49{
50 return true; // All X86 Macs are guaranteed to support at least SSE2
51}
52
53#else
54
55static bool isSSE2Present()
56{
57 static const int SSE2FeatureBit = 1 << 26;
58 struct SSE2Check {
59 SSE2Check()
60 {
61 int flags;
62#if COMPILER(MSVC)
63 _asm {
64 mov eax, 1 // cpuid function 1 gives us the standard feature set
65 cpuid;
66 mov flags, edx;
67 }
68#else
69 flags = 0;
70 // FIXME: Add GCC code to do above asm
71#endif
72 present = (flags & SSE2FeatureBit) != 0;
73 }
74 bool present;
75 };
76 static SSE2Check check;
77 return check.present;
78}
79
80#endif
81
82COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
83COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E);
84
85#if COMPILER(GCC) && PLATFORM(X86)
86
87asm(
88".globl _ctiTrampoline" "\n"
89"_ctiTrampoline:" "\n"
90 "pushl %esi" "\n"
91 "pushl %edi" "\n"
92 "subl $0x24, %esp" "\n"
93 "movl $512, %esi" "\n"
94 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above)
95 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
96 "addl $0x24, %esp" "\n"
97 "popl %edi" "\n"
98 "popl %esi" "\n"
99 "ret" "\n"
100);
101
102asm(
103".globl _ctiVMThrowTrampoline" "\n"
104"_ctiVMThrowTrampoline:" "\n"
105 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
106 "addl $0x24, %esp" "\n"
107 "popl %edi" "\n"
108 "popl %esi" "\n"
109 "ret" "\n"
110);
111
112#elif COMPILER(MSVC)
113
114extern "C" {
115
116 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, Register*, JSValue** exception, Profiler**, JSGlobalData*)
117 {
118 __asm {
119 push esi;
120 push edi;
121 sub esp, 0x24;
122 mov esi, 512;
123 mov ecx, esp;
124 mov edi, [esp + 0x38];
125 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
126 add esp, 0x24;
127 pop edi;
128 pop esi;
129 ret;
130 }
131 }
132
133 __declspec(naked) void ctiVMThrowTrampoline()
134 {
135 __asm {
136 mov ecx, esp;
137 call JSC::Machine::cti_vm_throw;
138 add esp, 0x24;
139 pop edi;
140 pop esi;
141 ret;
142 }
143 }
144
145}
146
147#endif
148
149ALWAYS_INLINE bool CTI::isConstant(int src)
150{
151 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
152}
153
154ALWAYS_INLINE JSValue* CTI::getConstant(CallFrame* callFrame, int src)
155{
156 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(callFrame);
157}
158
159// get arg puts an arg from the SF register array into a h/w register
160ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
161{
162 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
163 if (isConstant(src)) {
164 JSValue* js = getConstant(m_callFrame, src);
165 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
166 } else
167 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
168}
169
170// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
171ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
172{
173 if (isConstant(src)) {
174 JSValue* js = getConstant(m_callFrame, src);
175 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
176 } else {
177 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
178 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
179 }
180}
181
182// puts an arg onto the stack, as an arg to a context threaded function.
183ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
184{
185 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
186}
187
188ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
189{
190 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
191}
192
193ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
194{
195 if (isConstant(src)) {
196 JSValue* js = getConstant(m_callFrame, src);
197 return JSImmediate::isNumber(js) ? js : 0;
198 }
199 return 0;
200}
201
202ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
203{
204 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
205}
206
207ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
208{
209 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
210}
211
212ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
213{
214 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
215}
216
217ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
218{
219 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
220}
221
222ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
223{
224 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
225}
226
227ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
228{
229 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
230 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
231}
232
233ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
234{
235 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
236 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
237}
238
239#if ENABLE(SAMPLING_TOOL)
240unsigned inCalledCode = 0;
241#endif
242
243void ctiSetReturnAddress(void** where, void* what)
244{
245 *where = what;
246}
247
248void ctiRepatchCallByReturnAddress(void* where, void* what)
249{
250 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
251}
252
253#ifndef NDEBUG
254
255void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
256{
257 char which1 = '*';
258 if (isConstant(src1)) {
259 JSValue* js = getConstant(m_callFrame, src1);
260 which1 =
261 JSImmediate::isImmediate(js) ?
262 (JSImmediate::isNumber(js) ? 'i' :
263 JSImmediate::isBoolean(js) ? 'b' :
264 js->isUndefined() ? 'u' :
265 js->isNull() ? 'n' : '?')
266 :
267 (js->isString() ? 's' :
268 js->isObject() ? 'o' :
269 'k');
270 }
271 char which2 = '*';
272 if (isConstant(src2)) {
273 JSValue* js = getConstant(m_callFrame, src2);
274 which2 =
275 JSImmediate::isImmediate(js) ?
276 (JSImmediate::isNumber(js) ? 'i' :
277 JSImmediate::isBoolean(js) ? 'b' :
278 js->isUndefined() ? 'u' :
279 js->isNull() ? 'n' : '?')
280 :
281 (js->isString() ? 's' :
282 js->isObject() ? 'o' :
283 'k');
284 }
285 if ((which1 != '*') | (which2 != '*'))
286 fprintf(stderr, "Types %c %c\n", which1, which2);
287}
288
289#endif
290
291ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
292{
293 m_jit.emitRestoreArgumentReference();
294 X86Assembler::JmpSrc call = m_jit.emitCall(r);
295 m_calls.append(CallRecord(call, opcodeIndex));
296
297 return call;
298}
299
300ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
301{
302#if ENABLE(SAMPLING_TOOL)
303 m_jit.movl_i32m(1, &inCalledCode);
304#endif
305 m_jit.emitRestoreArgumentReference();
306 X86Assembler::JmpSrc call = m_jit.emitCall();
307 m_calls.append(CallRecord(call, helper, opcodeIndex));
308#if ENABLE(SAMPLING_TOOL)
309 m_jit.movl_i32m(0, &inCalledCode);
310#endif
311
312 return call;
313}
314
315ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
316{
317#if ENABLE(SAMPLING_TOOL)
318 m_jit.movl_i32m(1, &inCalledCode);
319#endif
320 m_jit.emitRestoreArgumentReference();
321 X86Assembler::JmpSrc call = m_jit.emitCall();
322 m_calls.append(CallRecord(call, helper, opcodeIndex));
323#if ENABLE(SAMPLING_TOOL)
324 m_jit.movl_i32m(0, &inCalledCode);
325#endif
326
327 return call;
328}
329
330ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
331{
332#if ENABLE(SAMPLING_TOOL)
333 m_jit.movl_i32m(1, &inCalledCode);
334#endif
335 m_jit.emitRestoreArgumentReference();
336 X86Assembler::JmpSrc call = m_jit.emitCall();
337 m_calls.append(CallRecord(call, helper, opcodeIndex));
338#if ENABLE(SAMPLING_TOOL)
339 m_jit.movl_i32m(0, &inCalledCode);
340#endif
341
342 return call;
343}
344
345ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
346{
347#if ENABLE(SAMPLING_TOOL)
348 m_jit.movl_i32m(1, &inCalledCode);
349#endif
350 m_jit.emitRestoreArgumentReference();
351 X86Assembler::JmpSrc call = m_jit.emitCall();
352 m_calls.append(CallRecord(call, helper, opcodeIndex));
353#if ENABLE(SAMPLING_TOOL)
354 m_jit.movl_i32m(0, &inCalledCode);
355#endif
356
357 return call;
358}
359
360ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
361{
362#if ENABLE(SAMPLING_TOOL)
363 m_jit.movl_i32m(1, &inCalledCode);
364#endif
365 m_jit.emitRestoreArgumentReference();
366 X86Assembler::JmpSrc call = m_jit.emitCall();
367 m_calls.append(CallRecord(call, helper, opcodeIndex));
368#if ENABLE(SAMPLING_TOOL)
369 m_jit.movl_i32m(0, &inCalledCode);
370#endif
371
372 return call;
373}
374
375ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_2 helper)
376{
377#if ENABLE(SAMPLING_TOOL)
378 m_jit.movl_i32m(1, &inCalledCode);
379#endif
380 m_jit.emitRestoreArgumentReference();
381 X86Assembler::JmpSrc call = m_jit.emitCall();
382 m_calls.append(CallRecord(call, helper, opcodeIndex));
383#if ENABLE(SAMPLING_TOOL)
384 m_jit.movl_i32m(0, &inCalledCode);
385#endif
386
387 return call;
388}
389
390ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
391{
392 m_jit.testl_i32r(JSImmediate::TagMask, reg);
393 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
394}
395
396ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
397{
398 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
399 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
400}
401
402ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
403{
404 m_jit.movl_rr(reg1, X86::ecx);
405 m_jit.andl_rr(reg2, X86::ecx);
406 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
407}
408
409ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
410{
411 ASSERT(JSImmediate::isNumber(imm));
412 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
413}
414
415ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
416{
417 // op_mod relies on this being a sub - setting zf if result is 0.
418 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
419}
420
421ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
422{
423 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
424}
425
426ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
427{
428 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
429}
430
431ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
432{
433 m_jit.sarl_i8r(1, reg);
434}
435
436ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
437{
438 m_jit.addl_rr(reg, reg);
439 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
440 emitFastArithReTagImmediate(reg);
441}
442
443ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
444{
445 m_jit.addl_rr(reg, reg);
446 emitFastArithReTagImmediate(reg);
447}
448
449ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
450{
451 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
452 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
453}
454
455CTI::CTI(Machine* machine, CallFrame* callFrame, CodeBlock* codeBlock)
456 : m_jit(machine->jitCodeBuffer())
457 , m_machine(machine)
458 , m_callFrame(callFrame)
459 , m_codeBlock(codeBlock)
460 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
461 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
462{
463}
464
465#define CTI_COMPILE_BINARY_OP(name) \
466 case name: { \
467 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
468 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
469 emitCall(i, Machine::cti_##name); \
470 emitPutResult(instruction[i + 1].u.operand); \
471 i += 4; \
472 break; \
473 }
474
475#define CTI_COMPILE_UNARY_OP(name) \
476 case name: { \
477 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
478 emitCall(i, Machine::cti_##name); \
479 emitPutResult(instruction[i + 1].u.operand); \
480 i += 3; \
481 break; \
482 }
483
484#if ENABLE(SAMPLING_TOOL)
485OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
486#endif
487
488void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
489{
490 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
491 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
492 m_jit.movl_i32m(reinterpret_cast<unsigned>(nullJSValue), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
493 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
494
495 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ecx); // newScopeChain
496 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
497 m_jit.movl_rm(X86::edi, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)), X86::edx);
498 m_jit.movl_rm(X86::ecx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
499}
500
501void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
502{
503 int dst = instruction[i + 1].u.operand;
504 int callee = instruction[i + 2].u.operand;
505 int firstArg = instruction[i + 4].u.operand;
506 int argCount = instruction[i + 5].u.operand;
507 int registerOffset = instruction[i + 6].u.operand;
508
509 if (type == OpCallEval)
510 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
511
512 if (type == OpConstruct) {
513 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
514 emitPutArgConstant(argCount, 16);
515 emitPutArgConstant(registerOffset, 12);
516 emitPutArgConstant(firstArg, 8);
517 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
518 } else {
519 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
520 emitPutArgConstant(argCount, 8);
521 emitPutArgConstant(registerOffset, 4);
522
523 int thisVal = instruction[i + 3].u.operand;
524 if (thisVal == missingThisObjectMarker()) {
525 // FIXME: should this be loaded dynamically off m_callFrame?
526 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_callFrame->globalThisValue()), firstArg * sizeof(Register), X86::edi);
527 } else {
528 emitGetArg(thisVal, X86::ecx);
529 emitPutResult(firstArg, X86::ecx);
530 }
531 }
532
533 X86Assembler::JmpSrc wasEval;
534 if (type == OpCallEval) {
535 emitGetPutArg(callee, 0, X86::ecx);
536 emitCall(i, Machine::cti_op_call_eval);
537
538 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
539 wasEval = m_jit.emitUnlinkedJne();
540
541 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
542 emitGetArg(callee, X86::ecx);
543 } else {
544 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
545 emitGetPutArg(callee, 0, X86::ecx);
546 }
547
548 // Fast check for JS function.
549 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
550 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
551 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
552 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
553 m_jit.link(isNotObject, m_jit.label());
554
555 // This handles host functions
556 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
557
558 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
559 m_jit.link(isJSFunction, m_jit.label());
560
561 // This handles JSFunctions
562 emitCall(i, (type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
563
564 compileOpCallInitializeCallFrame(callee, argCount);
565
566 // load ctiCode from the new codeBlock.
567 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
568
569 // Put the new value of 'callFrame' into edi and onto the stack, too.
570 emitPutCTIParam(X86::edx, CTI_ARGS_callFrame);
571 m_jit.movl_rr(X86::edx, X86::edi);
572
573 // Check the ctiCode has been generated - if not, this is handled in a slow case.
574 m_jit.testl_rr(X86::eax, X86::eax);
575 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
576 emitCall(i, X86::eax);
577
578 X86Assembler::JmpDst end = m_jit.label();
579 m_jit.link(wasNotJSFunction, end);
580 if (type == OpCallEval)
581 m_jit.link(wasEval, end);
582
583 // Put the return value in dst. In the interpreter, op_ret does this.
584 emitPutResult(dst);
585}
586
587void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
588{
589 bool negated = (type == OpNStrictEq);
590
591 unsigned dst = instruction[i + 1].u.operand;
592 unsigned src1 = instruction[i + 2].u.operand;
593 unsigned src2 = instruction[i + 3].u.operand;
594
595 emitGetArg(src1, X86::eax);
596 emitGetArg(src2, X86::edx);
597
598 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
599 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
600 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
601 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
602
603 m_jit.cmpl_rr(X86::edx, X86::eax);
604 if (negated)
605 m_jit.setne_r(X86::eax);
606 else
607 m_jit.sete_r(X86::eax);
608 m_jit.movzbl_rr(X86::eax, X86::eax);
609 emitTagAsBoolImmediate(X86::eax);
610
611 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
612
613 m_jit.link(firstNotImmediate, m_jit.label());
614
615 // check that edx is immediate but not the zero immediate
616 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
617 m_jit.setz_r(X86::ecx);
618 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
619 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
620 m_jit.sete_r(X86::edx);
621 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
622 m_jit.orl_rr(X86::ecx, X86::edx);
623
624 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
625
626 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
627
628 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
629
630 m_jit.link(secondNotImmediate, m_jit.label());
631 // check that eax is not the zero immediate (we know it must be immediate)
632 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
633 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
634
635 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
636
637 m_jit.link(bothWereImmediates, m_jit.label());
638 m_jit.link(firstWasNotImmediate, m_jit.label());
639
640 emitPutResult(dst);
641}
642
643void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
644{
645 m_jit.subl_i8r(1, X86::esi);
646 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
647 emitCall(opcodeIndex, Machine::cti_timeout_check);
648
649 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
650 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
651 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
652 m_jit.link(skipTimeout, m_jit.label());
653}
654
655/*
656 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
657
658 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
659 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
660
661 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
662 control will fall through from the code planted.
663*/
664void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
665{
666 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
667 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
668 m_jit.addl_rr(tempReg1, tempReg1);
669 m_jit.sarl_i8r(1, tempReg1);
670 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
671 // Compare & branch if immediate.
672 m_jit.ucomis_rr(tempXmm, xmmSource);
673 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
674 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
675
676 // Store the result to the JSNumberCell and jump.
677 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
678 emitPutResult(dst, jsNumberCell);
679 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
680
681 m_jit.link(resultIsImm, m_jit.label());
682 // value == (double)(JSImmediate)value... or at least, it looks that way...
683 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
684 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
685 m_jit.pextrw_irr(3, xmmSource, tempReg2);
686 m_jit.cmpl_i32r(0x8000, tempReg2);
687 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
688 // Yes it really really really is representable as a JSImmediate.
689 emitFastArithIntToImmNoCheck(tempReg1);
690 emitPutResult(dst, X86::ecx);
691}
692
693void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
694{
695 StructureID* numberStructureID = m_callFrame->globalData().numberStructureID.get();
696 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
697
698 emitGetArg(src1, X86::eax);
699 emitGetArg(src2, X86::edx);
700
701 if (types.second().isReusable() && isSSE2Present()) {
702 ASSERT(types.second().mightBeNumber());
703
704 // Check op2 is a number
705 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
706 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
707 if (!types.second().definitelyIsNumber()) {
708 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
709 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
710 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
711 }
712
713 // (1) In this case src2 is a reusable number cell.
714 // Slow case if src1 is not a number type.
715 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
716 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
717 if (!types.first().definitelyIsNumber()) {
718 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
719 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
720 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
721 }
722
723 // (1a) if we get here, src1 is also a number cell
724 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
725 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
726 // (1b) if we get here, src1 is an immediate
727 m_jit.link(op1imm, m_jit.label());
728 emitFastArithImmToInt(X86::eax);
729 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
730 // (1c)
731 m_jit.link(loadedDouble, m_jit.label());
732 if (opcodeID == op_add)
733 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
734 else if (opcodeID == op_sub)
735 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
736 else {
737 ASSERT(opcodeID == op_mul);
738 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
739 }
740
741 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
742 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
743
744 // (2) This handles cases where src2 is an immediate number.
745 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
746 m_jit.link(op2imm, m_jit.label());
747 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
748 } else if (types.first().isReusable() && isSSE2Present()) {
749 ASSERT(types.first().mightBeNumber());
750
751 // Check op1 is a number
752 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
753 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
754 if (!types.first().definitelyIsNumber()) {
755 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
756 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
757 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
758 }
759
760 // (1) In this case src1 is a reusable number cell.
761 // Slow case if src2 is not a number type.
762 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
763 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
764 if (!types.second().definitelyIsNumber()) {
765 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
766 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
767 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
768 }
769
770 // (1a) if we get here, src2 is also a number cell
771 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
772 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
773 // (1b) if we get here, src2 is an immediate
774 m_jit.link(op2imm, m_jit.label());
775 emitFastArithImmToInt(X86::edx);
776 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
777 // (1c)
778 m_jit.link(loadedDouble, m_jit.label());
779 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
780 if (opcodeID == op_add)
781 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
782 else if (opcodeID == op_sub)
783 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
784 else {
785 ASSERT(opcodeID == op_mul);
786 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
787 }
788 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
789 emitPutResult(dst);
790
791 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
792 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
793
794 // (2) This handles cases where src1 is an immediate number.
795 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
796 m_jit.link(op1imm, m_jit.label());
797 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
798 } else
799 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
800
801 if (opcodeID == op_add) {
802 emitFastArithDeTagImmediate(X86::eax);
803 m_jit.addl_rr(X86::edx, X86::eax);
804 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
805 } else if (opcodeID == op_sub) {
806 m_jit.subl_rr(X86::edx, X86::eax);
807 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
808 emitFastArithReTagImmediate(X86::eax);
809 } else {
810 ASSERT(opcodeID == op_mul);
811 emitFastArithDeTagImmediate(X86::eax);
812 emitFastArithImmToInt(X86::edx);
813 m_jit.imull_rr(X86::edx, X86::eax);
814 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
815 emitFastArithReTagImmediate(X86::eax);
816 }
817 emitPutResult(dst);
818
819 if (types.second().isReusable() && isSSE2Present()) {
820 m_jit.link(wasJSNumberCell2, m_jit.label());
821 m_jit.link(wasJSNumberCell2b, m_jit.label());
822 }
823 else if (types.first().isReusable() && isSSE2Present()) {
824 m_jit.link(wasJSNumberCell1, m_jit.label());
825 m_jit.link(wasJSNumberCell1b, m_jit.label());
826 }
827}
828
829void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
830{
831 X86Assembler::JmpDst here = m_jit.label();
832 m_jit.link(iter->from, here);
833 if (types.second().isReusable() && isSSE2Present()) {
834 if (!types.first().definitelyIsNumber()) {
835 m_jit.link((++iter)->from, here);
836 m_jit.link((++iter)->from, here);
837 }
838 if (!types.second().definitelyIsNumber()) {
839 m_jit.link((++iter)->from, here);
840 m_jit.link((++iter)->from, here);
841 }
842 m_jit.link((++iter)->from, here);
843 } else if (types.first().isReusable() && isSSE2Present()) {
844 if (!types.first().definitelyIsNumber()) {
845 m_jit.link((++iter)->from, here);
846 m_jit.link((++iter)->from, here);
847 }
848 if (!types.second().definitelyIsNumber()) {
849 m_jit.link((++iter)->from, here);
850 m_jit.link((++iter)->from, here);
851 }
852 m_jit.link((++iter)->from, here);
853 } else
854 m_jit.link((++iter)->from, here);
855
856 emitGetPutArg(src1, 0, X86::ecx);
857 emitGetPutArg(src2, 4, X86::ecx);
858 if (opcodeID == op_add)
859 emitCall(i, Machine::cti_op_add);
860 else if (opcodeID == op_sub)
861 emitCall(i, Machine::cti_op_sub);
862 else {
863 ASSERT(opcodeID == op_mul);
864 emitCall(i, Machine::cti_op_mul);
865 }
866 emitPutResult(dst);
867}
868
869void CTI::privateCompileMainPass()
870{
871 Instruction* instruction = m_codeBlock->instructions.begin();
872 unsigned instructionCount = m_codeBlock->instructions.size();
873
874 unsigned structureIDInstructionIndex = 0;
875
876 for (unsigned i = 0; i < instructionCount; ) {
877 m_labels[i] = m_jit.label();
878
879#if ENABLE(SAMPLING_TOOL)
880 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), &currentOpcodeID);
881#endif
882
883 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
884 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
885 case op_mov: {
886 unsigned src = instruction[i + 2].u.operand;
887 if (isConstant(src))
888 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_callFrame, src)), X86::edx);
889 else
890 emitGetArg(src, X86::edx);
891 emitPutResult(instruction[i + 1].u.operand, X86::edx);
892 i += 3;
893 break;
894 }
895 case op_add: {
896 unsigned dst = instruction[i + 1].u.operand;
897 unsigned src1 = instruction[i + 2].u.operand;
898 unsigned src2 = instruction[i + 3].u.operand;
899
900 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
901 emitGetArg(src2, X86::edx);
902 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
903 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
904 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
905 emitPutResult(dst, X86::edx);
906 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
907 emitGetArg(src1, X86::eax);
908 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
909 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
910 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
911 emitPutResult(dst);
912 } else {
913 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
914 if (types.first().mightBeNumber() && types.second().mightBeNumber())
915 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
916 else {
917 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
918 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
919 emitCall(i, Machine::cti_op_add);
920 emitPutResult(instruction[i + 1].u.operand);
921 }
922 }
923
924 i += 5;
925 break;
926 }
927 case op_end: {
928 if (m_codeBlock->needsFullScopeChain)
929 emitCall(i, Machine::cti_op_end);
930 emitGetArg(instruction[i + 1].u.operand, X86::eax);
931#if ENABLE(SAMPLING_TOOL)
932 m_jit.movl_i32m(-1, &currentOpcodeID);
933#endif
934 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
935 m_jit.ret();
936 i += 2;
937 break;
938 }
939 case op_jmp: {
940 unsigned target = instruction[i + 1].u.operand;
941 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
942 i += 2;
943 break;
944 }
945 case op_pre_inc: {
946 int srcDst = instruction[i + 1].u.operand;
947 emitGetArg(srcDst, X86::eax);
948 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
949 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
950 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
951 emitPutResult(srcDst, X86::eax);
952 i += 2;
953 break;
954 }
955 case op_loop: {
956 emitSlowScriptCheck(i);
957
958 unsigned target = instruction[i + 1].u.operand;
959 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
960 i += 2;
961 break;
962 }
963 case op_loop_if_less: {
964 emitSlowScriptCheck(i);
965
966 unsigned target = instruction[i + 3].u.operand;
967 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
968 if (src2imm) {
969 emitGetArg(instruction[i + 1].u.operand, X86::edx);
970 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
971 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
972 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
973 } else {
974 emitGetArg(instruction[i + 1].u.operand, X86::eax);
975 emitGetArg(instruction[i + 2].u.operand, X86::edx);
976 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
977 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
978 m_jit.cmpl_rr(X86::edx, X86::eax);
979 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
980 }
981 i += 4;
982 break;
983 }
984 case op_loop_if_lesseq: {
985 emitSlowScriptCheck(i);
986
987 unsigned target = instruction[i + 3].u.operand;
988 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
989 if (src2imm) {
990 emitGetArg(instruction[i + 1].u.operand, X86::edx);
991 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
992 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
993 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
994 } else {
995 emitGetArg(instruction[i + 1].u.operand, X86::eax);
996 emitGetArg(instruction[i + 2].u.operand, X86::edx);
997 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
998 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
999 m_jit.cmpl_rr(X86::edx, X86::eax);
1000 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1001 }
1002 i += 4;
1003 break;
1004 }
1005 case op_new_object: {
1006 emitCall(i, Machine::cti_op_new_object);
1007 emitPutResult(instruction[i + 1].u.operand);
1008 i += 2;
1009 break;
1010 }
1011 case op_put_by_id: {
1012 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1013 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1014 // such that the StructureID & offset are always at the same distance from this.
1015
1016 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1017 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1018
1019 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1020 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1021 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1022 ++structureIDInstructionIndex;
1023
1024 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1025 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1026 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1027 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1028 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1029 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1030
1031 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1032 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1033 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1034 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1035
1036 i += 8;
1037 break;
1038 }
1039 case op_get_by_id: {
1040 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1041 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1042 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1043 // to jump back to if one of these trampolies finds a match.
1044
1045 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1046
1047 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1048
1049 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1050 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1051 ++structureIDInstructionIndex;
1052
1053 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1054 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1055 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1056 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1057 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1058
1059 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1060 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1061 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1062 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1063
1064 i += 8;
1065 break;
1066 }
1067 case op_instanceof: {
1068 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1069 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1070 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1071
1072 // check if any are immediates
1073 m_jit.orl_rr(X86::eax, X86::ecx);
1074 m_jit.orl_rr(X86::edx, X86::ecx);
1075 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1076
1077 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1078
1079 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1080 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1081 // this works because NumberType and StringType are smaller
1082 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1083 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1084 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1085 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1086 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1087 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1088 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1089 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1090
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1092
1093 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1094 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1095 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1096 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1097
1098 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1099
1100 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1101 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1102
1103 // optimistically load true result
1104 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1105
1106 X86Assembler::JmpDst loop = m_jit.label();
1107
1108 // load value's prototype
1109 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1110 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1111
1112 m_jit.cmpl_rr(X86::ecx, X86::edx);
1113 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1114
1115 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1116 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1117 m_jit.link(goToLoop, loop);
1118
1119 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1120
1121 m_jit.link(exit, m_jit.label());
1122
1123 emitPutResult(instruction[i + 1].u.operand);
1124
1125 i += 5;
1126 break;
1127 }
1128 case op_del_by_id: {
1129 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1130 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1131 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1132 emitCall(i, Machine::cti_op_del_by_id);
1133 emitPutResult(instruction[i + 1].u.operand);
1134 i += 4;
1135 break;
1136 }
1137 case op_mul: {
1138 unsigned dst = instruction[i + 1].u.operand;
1139 unsigned src1 = instruction[i + 2].u.operand;
1140 unsigned src2 = instruction[i + 3].u.operand;
1141
1142 if (JSValue* src1Value = getConstantImmediateNumericArg(src1)) {
1143 emitGetArg(src2, X86::eax);
1144 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1145 emitFastArithImmToInt(X86::eax);
1146 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src1Value), X86::eax);
1147 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1148 emitFastArithReTagImmediate(X86::eax);
1149 emitPutResult(dst);
1150 } else if (JSValue* src2Value = getConstantImmediateNumericArg(src2)) {
1151 emitGetArg(src1, X86::eax);
1152 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1153 emitFastArithImmToInt(X86::eax);
1154 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src2Value), X86::eax);
1155 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1156 emitFastArithReTagImmediate(X86::eax);
1157 emitPutResult(dst);
1158 } else
1159 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1160
1161 i += 5;
1162 break;
1163 }
1164 case op_new_func: {
1165 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1166 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1167 emitCall(i, Machine::cti_op_new_func);
1168 emitPutResult(instruction[i + 1].u.operand);
1169 i += 3;
1170 break;
1171 }
1172 case op_call: {
1173 compileOpCall(instruction, i);
1174 i += 7;
1175 break;
1176 }
1177 case op_get_global_var: {
1178 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1179 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1180 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1181 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1182 i += 4;
1183 break;
1184 }
1185 case op_put_global_var: {
1186 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1187 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1188 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1189 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1190 i += 4;
1191 break;
1192 }
1193 case op_get_scoped_var: {
1194 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1195
1196 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1197 while (skip--)
1198 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1199
1200 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1201 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1202 emitPutResult(instruction[i + 1].u.operand);
1203 i += 4;
1204 break;
1205 }
1206 case op_put_scoped_var: {
1207 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1208
1209 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1210 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1211 while (skip--)
1212 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1213
1214 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1215 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1216 i += 4;
1217 break;
1218 }
1219 case op_tear_off_activation: {
1220 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1221 emitCall(i, Machine::cti_op_tear_off_activation);
1222 i += 2;
1223 break;
1224 }
1225 case op_tear_off_arguments: {
1226 emitCall(i, Machine::cti_op_tear_off_arguments);
1227 i += 1;
1228 break;
1229 }
1230 case op_ret: {
1231 // Check for a profiler - if there is one, jump to the hook below.
1232 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1233 m_jit.cmpl_i32m(0, X86::eax);
1234 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1235 X86Assembler::JmpDst profiled = m_jit.label();
1236
1237 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1238 if (m_codeBlock->needsFullScopeChain)
1239 emitCall(i, Machine::cti_op_ret_scopeChain);
1240
1241 // Return the result in %eax.
1242 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1243
1244 // Grab the return address.
1245 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1246
1247 // Restore our caller's "r".
1248 emitGetArg(RegisterFile::CallerFrame, X86::edi);
1249 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
1250
1251 // Return.
1252 m_jit.pushl_r(X86::edx);
1253 m_jit.ret();
1254
1255 // Profiling hook
1256 m_jit.link(profile, m_jit.label());
1257 emitCall(i, Machine::cti_op_ret_profiler);
1258 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1259
1260 i += 2;
1261 break;
1262 }
1263 case op_new_array: {
1264 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1265 emitPutArg(X86::edx, 0);
1266 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1267 emitCall(i, Machine::cti_op_new_array);
1268 emitPutResult(instruction[i + 1].u.operand);
1269 i += 4;
1270 break;
1271 }
1272 case op_resolve: {
1273 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1274 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1275 emitCall(i, Machine::cti_op_resolve);
1276 emitPutResult(instruction[i + 1].u.operand);
1277 i += 3;
1278 break;
1279 }
1280 case op_construct: {
1281 compileOpCall(instruction, i, OpConstruct);
1282 i += 7;
1283 break;
1284 }
1285 case op_construct_verify: {
1286 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1287
1288 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1289 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1290 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1291 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1292 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1293
1294 m_jit.link(isImmediate, m_jit.label());
1295 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1296 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1297 m_jit.link(isObject, m_jit.label());
1298
1299 i += 3;
1300 break;
1301 }
1302 case op_get_by_val: {
1303 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1304 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1305 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1306 emitFastArithImmToInt(X86::edx);
1307 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1308 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1309 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1310 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1311
1312 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1313 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1314 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1315 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1316
1317 // Get the value from the vector
1318 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1319 emitPutResult(instruction[i + 1].u.operand);
1320 i += 4;
1321 break;
1322 }
1323 case op_resolve_func: {
1324 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1325 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1326 emitCall(i, Machine::cti_op_resolve_func);
1327 emitPutResult(instruction[i + 1].u.operand);
1328 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1329 i += 4;
1330 break;
1331 }
1332 case op_sub: {
1333 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1334 i += 5;
1335 break;
1336 }
1337 case op_put_by_val: {
1338 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1339 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1340 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1341 emitFastArithImmToInt(X86::edx);
1342 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1343 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1344 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1345 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1346
1347 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1348 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1349 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1350 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1351 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1352 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1353 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1354
1355 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1356 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1357 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1358 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1359
1360 // All good - put the value into the array.
1361 m_jit.link(inFastVector, m_jit.label());
1362 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1363 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1364 i += 4;
1365 break;
1366 }
1367 CTI_COMPILE_BINARY_OP(op_lesseq)
1368 case op_loop_if_true: {
1369 emitSlowScriptCheck(i);
1370
1371 unsigned target = instruction[i + 2].u.operand;
1372 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1373
1374 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1375 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1376 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1377 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1378
1379 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1380 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1381 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1382 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1383
1384 m_jit.link(isZero, m_jit.label());
1385 i += 3;
1386 break;
1387 };
1388 case op_resolve_base: {
1389 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1390 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1391 emitCall(i, Machine::cti_op_resolve_base);
1392 emitPutResult(instruction[i + 1].u.operand);
1393 i += 3;
1394 break;
1395 }
1396 case op_negate: {
1397 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1398 emitCall(i, Machine::cti_op_negate);
1399 emitPutResult(instruction[i + 1].u.operand);
1400 i += 3;
1401 break;
1402 }
1403 case op_resolve_skip: {
1404 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1405 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1406 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1407 emitCall(i, Machine::cti_op_resolve_skip);
1408 emitPutResult(instruction[i + 1].u.operand);
1409 i += 4;
1410 break;
1411 }
1412 case op_resolve_global: {
1413 // Fast case
1414 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1415 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1416 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1417 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1418
1419 // Check StructureID of global object
1420 m_jit.movl_i32r(globalObject, X86::eax);
1421 m_jit.movl_mr(structureIDAddr, X86::edx);
1422 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1423 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1424 m_slowCases.append(SlowCaseEntry(slowCase, i));
1425
1426 // Load cached property
1427 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1428 m_jit.movl_mr(offsetAddr, X86::edx);
1429 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1430 emitPutResult(instruction[i + 1].u.operand);
1431 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1432
1433 // Slow case
1434 m_jit.link(slowCase, m_jit.label());
1435 emitPutArgConstant(globalObject, 0);
1436 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1437 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1438 emitCall(i, Machine::cti_op_resolve_global);
1439 emitPutResult(instruction[i + 1].u.operand);
1440 m_jit.link(end, m_jit.label());
1441 i += 6;
1442 ++structureIDInstructionIndex;
1443 break;
1444 }
1445 CTI_COMPILE_BINARY_OP(op_div)
1446 case op_pre_dec: {
1447 int srcDst = instruction[i + 1].u.operand;
1448 emitGetArg(srcDst, X86::eax);
1449 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1450 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1451 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1452 emitPutResult(srcDst, X86::eax);
1453 i += 2;
1454 break;
1455 }
1456 case op_jnless: {
1457 unsigned target = instruction[i + 3].u.operand;
1458 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1459 if (src2imm) {
1460 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1461 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1462 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1463 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1464 } else {
1465 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1466 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1467 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1468 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1469 m_jit.cmpl_rr(X86::edx, X86::eax);
1470 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1471 }
1472 i += 4;
1473 break;
1474 }
1475 case op_not: {
1476 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1477 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1478 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1479 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1480 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1481 emitPutResult(instruction[i + 1].u.operand);
1482 i += 3;
1483 break;
1484 }
1485 case op_jfalse: {
1486 unsigned target = instruction[i + 2].u.operand;
1487 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1488
1489 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1490 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1491 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1492 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1493
1494 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1495 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1496 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1497 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1498
1499 m_jit.link(isNonZero, m_jit.label());
1500 i += 3;
1501 break;
1502 };
1503 case op_post_inc: {
1504 int srcDst = instruction[i + 2].u.operand;
1505 emitGetArg(srcDst, X86::eax);
1506 m_jit.movl_rr(X86::eax, X86::edx);
1507 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1508 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1509 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1510 emitPutResult(srcDst, X86::edx);
1511 emitPutResult(instruction[i + 1].u.operand);
1512 i += 3;
1513 break;
1514 }
1515 case op_unexpected_load: {
1516 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1517 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1518 emitPutResult(instruction[i + 1].u.operand);
1519 i += 3;
1520 break;
1521 }
1522 case op_jsr: {
1523 int retAddrDst = instruction[i + 1].u.operand;
1524 int target = instruction[i + 2].u.operand;
1525 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1526 X86Assembler::JmpDst addrPosition = m_jit.label();
1527 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1528 X86Assembler::JmpDst sretTarget = m_jit.label();
1529 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1530 i += 3;
1531 break;
1532 }
1533 case op_sret: {
1534 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1535 i += 2;
1536 break;
1537 }
1538 case op_eq: {
1539 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1540 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1541 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1542 m_jit.cmpl_rr(X86::edx, X86::eax);
1543 m_jit.sete_r(X86::eax);
1544 m_jit.movzbl_rr(X86::eax, X86::eax);
1545 emitTagAsBoolImmediate(X86::eax);
1546 emitPutResult(instruction[i + 1].u.operand);
1547 i += 4;
1548 break;
1549 }
1550 case op_lshift: {
1551 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1552 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1553 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1554 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1555 emitFastArithImmToInt(X86::eax);
1556 emitFastArithImmToInt(X86::ecx);
1557 m_jit.shll_CLr(X86::eax);
1558 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1559 emitPutResult(instruction[i + 1].u.operand);
1560 i += 4;
1561 break;
1562 }
1563 case op_bitand: {
1564 unsigned src1 = instruction[i + 2].u.operand;
1565 unsigned src2 = instruction[i + 3].u.operand;
1566 unsigned dst = instruction[i + 1].u.operand;
1567 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1568 emitGetArg(src2, X86::eax);
1569 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1570 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1571 emitPutResult(dst);
1572 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1573 emitGetArg(src1, X86::eax);
1574 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1575 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1576 emitPutResult(dst);
1577 } else {
1578 emitGetArg(src1, X86::eax);
1579 emitGetArg(src2, X86::edx);
1580 m_jit.andl_rr(X86::edx, X86::eax);
1581 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1582 emitPutResult(dst);
1583 }
1584 i += 5;
1585 break;
1586 }
1587 case op_rshift: {
1588 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1589 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1590 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1591 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1592 emitFastArithImmToInt(X86::ecx);
1593 m_jit.sarl_CLr(X86::eax);
1594 emitFastArithPotentiallyReTagImmediate(X86::eax);
1595 emitPutResult(instruction[i + 1].u.operand);
1596 i += 4;
1597 break;
1598 }
1599 case op_bitnot: {
1600 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1601 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1602 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1603 emitPutResult(instruction[i + 1].u.operand);
1604 i += 3;
1605 break;
1606 }
1607 case op_resolve_with_base: {
1608 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1609 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1610 emitCall(i, Machine::cti_op_resolve_with_base);
1611 emitPutResult(instruction[i + 1].u.operand);
1612 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1613 i += 4;
1614 break;
1615 }
1616 case op_new_func_exp: {
1617 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1618 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1619 emitCall(i, Machine::cti_op_new_func_exp);
1620 emitPutResult(instruction[i + 1].u.operand);
1621 i += 3;
1622 break;
1623 }
1624 case op_mod: {
1625 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1626 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1627 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1628 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1629 emitFastArithDeTagImmediate(X86::eax);
1630 emitFastArithDeTagImmediate(X86::ecx);
1631 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1632 m_jit.cdq();
1633 m_jit.idivl_r(X86::ecx);
1634 emitFastArithReTagImmediate(X86::edx);
1635 m_jit.movl_rr(X86::edx, X86::eax);
1636 emitPutResult(instruction[i + 1].u.operand);
1637 i += 4;
1638 break;
1639 }
1640 case op_jtrue: {
1641 unsigned target = instruction[i + 2].u.operand;
1642 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1643
1644 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1645 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1646 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1647 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1648
1649 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1650 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1651 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1652 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1653
1654 m_jit.link(isZero, m_jit.label());
1655 i += 3;
1656 break;
1657 }
1658 CTI_COMPILE_BINARY_OP(op_less)
1659 case op_neq: {
1660 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1661 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1662 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1663 m_jit.cmpl_rr(X86::eax, X86::edx);
1664
1665 m_jit.setne_r(X86::eax);
1666 m_jit.movzbl_rr(X86::eax, X86::eax);
1667 emitTagAsBoolImmediate(X86::eax);
1668
1669 emitPutResult(instruction[i + 1].u.operand);
1670
1671 i += 4;
1672 break;
1673 }
1674 case op_post_dec: {
1675 int srcDst = instruction[i + 2].u.operand;
1676 emitGetArg(srcDst, X86::eax);
1677 m_jit.movl_rr(X86::eax, X86::edx);
1678 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1679 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1680 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1681 emitPutResult(srcDst, X86::edx);
1682 emitPutResult(instruction[i + 1].u.operand);
1683 i += 3;
1684 break;
1685 }
1686 CTI_COMPILE_BINARY_OP(op_urshift)
1687 case op_bitxor: {
1688 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1689 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1690 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1691 m_jit.xorl_rr(X86::edx, X86::eax);
1692 emitFastArithReTagImmediate(X86::eax);
1693 emitPutResult(instruction[i + 1].u.operand);
1694 i += 5;
1695 break;
1696 }
1697 case op_new_regexp: {
1698 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1699 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1700 emitCall(i, Machine::cti_op_new_regexp);
1701 emitPutResult(instruction[i + 1].u.operand);
1702 i += 3;
1703 break;
1704 }
1705 case op_bitor: {
1706 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1707 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1708 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1709 m_jit.orl_rr(X86::edx, X86::eax);
1710 emitPutResult(instruction[i + 1].u.operand);
1711 i += 5;
1712 break;
1713 }
1714 case op_call_eval: {
1715 compileOpCall(instruction, i, OpCallEval);
1716 i += 7;
1717 break;
1718 }
1719 case op_throw: {
1720 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1721 emitCall(i, Machine::cti_op_throw);
1722 m_jit.addl_i8r(0x24, X86::esp);
1723 m_jit.popl_r(X86::edi);
1724 m_jit.popl_r(X86::esi);
1725 m_jit.ret();
1726 i += 2;
1727 break;
1728 }
1729 case op_get_pnames: {
1730 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1731 emitCall(i, Machine::cti_op_get_pnames);
1732 emitPutResult(instruction[i + 1].u.operand);
1733 i += 3;
1734 break;
1735 }
1736 case op_next_pname: {
1737 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1738 unsigned target = instruction[i + 3].u.operand;
1739 emitCall(i, Machine::cti_op_next_pname);
1740 m_jit.testl_rr(X86::eax, X86::eax);
1741 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1742 emitPutResult(instruction[i + 1].u.operand);
1743 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1744 m_jit.link(endOfIter, m_jit.label());
1745 i += 4;
1746 break;
1747 }
1748 case op_push_scope: {
1749 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1750 emitCall(i, Machine::cti_op_push_scope);
1751 i += 2;
1752 break;
1753 }
1754 case op_pop_scope: {
1755 emitCall(i, Machine::cti_op_pop_scope);
1756 i += 1;
1757 break;
1758 }
1759 CTI_COMPILE_UNARY_OP(op_typeof)
1760 CTI_COMPILE_UNARY_OP(op_is_undefined)
1761 CTI_COMPILE_UNARY_OP(op_is_boolean)
1762 CTI_COMPILE_UNARY_OP(op_is_number)
1763 CTI_COMPILE_UNARY_OP(op_is_string)
1764 CTI_COMPILE_UNARY_OP(op_is_object)
1765 CTI_COMPILE_UNARY_OP(op_is_function)
1766 case op_stricteq: {
1767 compileOpStrictEq(instruction, i, OpStrictEq);
1768 i += 4;
1769 break;
1770 }
1771 case op_nstricteq: {
1772 compileOpStrictEq(instruction, i, OpNStrictEq);
1773 i += 4;
1774 break;
1775 }
1776 case op_to_jsnumber: {
1777 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1778
1779 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1780 X86Assembler::JmpSrc wasImmediate = m_jit.emitUnlinkedJnz();
1781
1782 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1783
1784 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1785 m_jit.cmpl_i32m(NumberType, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::ecx);
1786
1787 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1788
1789 m_jit.link(wasImmediate, m_jit.label());
1790
1791 emitPutResult(instruction[i + 1].u.operand);
1792 i += 3;
1793 break;
1794 }
1795 case op_in: {
1796 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1797 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1798 emitCall(i, Machine::cti_op_in);
1799 emitPutResult(instruction[i + 1].u.operand);
1800 i += 4;
1801 break;
1802 }
1803 case op_push_new_scope: {
1804 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1805 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1806 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1807 emitCall(i, Machine::cti_op_push_new_scope);
1808 emitPutResult(instruction[i + 1].u.operand);
1809 i += 4;
1810 break;
1811 }
1812 case op_catch: {
1813 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r
1814 emitPutResult(instruction[i + 1].u.operand);
1815 i += 2;
1816 break;
1817 }
1818 case op_jmp_scopes: {
1819 unsigned count = instruction[i + 1].u.operand;
1820 emitPutArgConstant(count, 0);
1821 emitCall(i, Machine::cti_op_jmp_scopes);
1822 unsigned target = instruction[i + 2].u.operand;
1823 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1824 i += 3;
1825 break;
1826 }
1827 case op_put_by_index: {
1828 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1829 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1830 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1831 emitCall(i, Machine::cti_op_put_by_index);
1832 i += 4;
1833 break;
1834 }
1835 case op_switch_imm: {
1836 unsigned tableIndex = instruction[i + 1].u.operand;
1837 unsigned defaultOffset = instruction[i + 2].u.operand;
1838 unsigned scrutinee = instruction[i + 3].u.operand;
1839
1840 // create jump table for switch destinations, track this switch statement.
1841 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1842 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1843 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1844
1845 emitGetPutArg(scrutinee, 0, X86::ecx);
1846 emitPutArgConstant(tableIndex, 4);
1847 emitCall(i, Machine::cti_op_switch_imm);
1848 m_jit.jmp_r(X86::eax);
1849 i += 4;
1850 break;
1851 }
1852 case op_switch_char: {
1853 unsigned tableIndex = instruction[i + 1].u.operand;
1854 unsigned defaultOffset = instruction[i + 2].u.operand;
1855 unsigned scrutinee = instruction[i + 3].u.operand;
1856
1857 // create jump table for switch destinations, track this switch statement.
1858 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1859 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1860 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1861
1862 emitGetPutArg(scrutinee, 0, X86::ecx);
1863 emitPutArgConstant(tableIndex, 4);
1864 emitCall(i, Machine::cti_op_switch_char);
1865 m_jit.jmp_r(X86::eax);
1866 i += 4;
1867 break;
1868 }
1869 case op_switch_string: {
1870 unsigned tableIndex = instruction[i + 1].u.operand;
1871 unsigned defaultOffset = instruction[i + 2].u.operand;
1872 unsigned scrutinee = instruction[i + 3].u.operand;
1873
1874 // create jump table for switch destinations, track this switch statement.
1875 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1876 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1877
1878 emitGetPutArg(scrutinee, 0, X86::ecx);
1879 emitPutArgConstant(tableIndex, 4);
1880 emitCall(i, Machine::cti_op_switch_string);
1881 m_jit.jmp_r(X86::eax);
1882 i += 4;
1883 break;
1884 }
1885 case op_del_by_val: {
1886 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1887 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1888 emitCall(i, Machine::cti_op_del_by_val);
1889 emitPutResult(instruction[i + 1].u.operand);
1890 i += 4;
1891 break;
1892 }
1893 case op_put_getter: {
1894 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1895 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1896 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1897 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1898 emitCall(i, Machine::cti_op_put_getter);
1899 i += 4;
1900 break;
1901 }
1902 case op_put_setter: {
1903 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1904 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1905 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1906 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1907 emitCall(i, Machine::cti_op_put_setter);
1908 i += 4;
1909 break;
1910 }
1911 case op_new_error: {
1912 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1913 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1914 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1915 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1916 emitCall(i, Machine::cti_op_new_error);
1917 emitPutResult(instruction[i + 1].u.operand);
1918 i += 4;
1919 break;
1920 }
1921 case op_debug: {
1922 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1923 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1924 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1925 emitCall(i, Machine::cti_op_debug);
1926 i += 4;
1927 break;
1928 }
1929 case op_eq_null: {
1930 unsigned dst = instruction[i + 1].u.operand;
1931 unsigned src1 = instruction[i + 2].u.operand;
1932
1933 emitGetArg(src1, X86::eax);
1934 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1935 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1936
1937 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1938 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1939 m_jit.setnz_r(X86::eax);
1940
1941 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1942
1943 m_jit.link(isImmediate, m_jit.label());
1944
1945 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1946 m_jit.andl_rr(X86::eax, X86::ecx);
1947 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1948 m_jit.sete_r(X86::eax);
1949
1950 m_jit.link(wasNotImmediate, m_jit.label());
1951
1952 m_jit.movzbl_rr(X86::eax, X86::eax);
1953 emitTagAsBoolImmediate(X86::eax);
1954 emitPutResult(dst);
1955
1956 i += 3;
1957 break;
1958 }
1959 case op_neq_null: {
1960 unsigned dst = instruction[i + 1].u.operand;
1961 unsigned src1 = instruction[i + 2].u.operand;
1962
1963 emitGetArg(src1, X86::eax);
1964 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1965 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1966
1967 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1968 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1969 m_jit.setz_r(X86::eax);
1970
1971 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1972
1973 m_jit.link(isImmediate, m_jit.label());
1974
1975 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1976 m_jit.andl_rr(X86::eax, X86::ecx);
1977 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1978 m_jit.setne_r(X86::eax);
1979
1980 m_jit.link(wasNotImmediate, m_jit.label());
1981
1982 m_jit.movzbl_rr(X86::eax, X86::eax);
1983 emitTagAsBoolImmediate(X86::eax);
1984 emitPutResult(dst);
1985
1986 i += 3;
1987 break;
1988 }
1989 case op_enter: {
1990 // Even though CTI doesn't use them, we initialize our constant
1991 // registers to zap stale pointers, to avoid unnecessarily prolonging
1992 // object lifetime and increasing GC pressure.
1993 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1994 for (size_t j = 0; j < count; ++j)
1995 emitInitRegister(j);
1996
1997 i+= 1;
1998 break;
1999 }
2000 case op_enter_with_activation: {
2001 // Even though CTI doesn't use them, we initialize our constant
2002 // registers to zap stale pointers, to avoid unnecessarily prolonging
2003 // object lifetime and increasing GC pressure.
2004 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2005 for (size_t j = 0; j < count; ++j)
2006 emitInitRegister(j);
2007
2008 emitCall(i, Machine::cti_op_push_activation);
2009 emitPutResult(instruction[i + 1].u.operand);
2010
2011 i+= 2;
2012 break;
2013 }
2014 case op_create_arguments: {
2015 emitCall(i, Machine::cti_op_create_arguments);
2016 i += 1;
2017 break;
2018 }
2019 case op_convert_this: {
2020 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2021
2022 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2023 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2024 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2025 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2026
2027 i += 2;
2028 break;
2029 }
2030 case op_get_array_length:
2031 case op_get_by_id_chain:
2032 case op_get_by_id_generic:
2033 case op_get_by_id_proto:
2034 case op_get_by_id_self:
2035 case op_get_string_length:
2036 case op_put_by_id_generic:
2037 case op_put_by_id_replace:
2038 case op_put_by_id_transition:
2039 ASSERT_NOT_REACHED();
2040 }
2041 }
2042
2043 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2044}
2045
2046
2047void CTI::privateCompileLinkPass()
2048{
2049 unsigned jmpTableCount = m_jmpTable.size();
2050 for (unsigned i = 0; i < jmpTableCount; ++i)
2051 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2052 m_jmpTable.clear();
2053}
2054
2055#define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2056 case name: { \
2057 m_jit.link(iter->from, m_jit.label()); \
2058 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2059 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2060 emitCall(i, Machine::cti_##name); \
2061 emitPutResult(instruction[i + 1].u.operand); \
2062 i += 4; \
2063 break; \
2064 }
2065
2066void CTI::privateCompileSlowCases()
2067{
2068 unsigned structureIDInstructionIndex = 0;
2069
2070 Instruction* instruction = m_codeBlock->instructions.begin();
2071 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2072 unsigned i = iter->to;
2073 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2074 case op_convert_this: {
2075 m_jit.link(iter->from, m_jit.label());
2076 m_jit.link((++iter)->from, m_jit.label());
2077 emitPutArg(X86::eax, 0);
2078 emitCall(i, Machine::cti_op_convert_this);
2079 emitPutResult(instruction[i + 1].u.operand);
2080 i += 2;
2081 break;
2082 }
2083 case op_add: {
2084 unsigned dst = instruction[i + 1].u.operand;
2085 unsigned src1 = instruction[i + 2].u.operand;
2086 unsigned src2 = instruction[i + 3].u.operand;
2087 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2088 X86Assembler::JmpSrc notImm = iter->from;
2089 m_jit.link((++iter)->from, m_jit.label());
2090 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2091 m_jit.link(notImm, m_jit.label());
2092 emitGetPutArg(src1, 0, X86::ecx);
2093 emitPutArg(X86::edx, 4);
2094 emitCall(i, Machine::cti_op_add);
2095 emitPutResult(dst);
2096 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2097 X86Assembler::JmpSrc notImm = iter->from;
2098 m_jit.link((++iter)->from, m_jit.label());
2099 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2100 m_jit.link(notImm, m_jit.label());
2101 emitPutArg(X86::eax, 0);
2102 emitGetPutArg(src2, 4, X86::ecx);
2103 emitCall(i, Machine::cti_op_add);
2104 emitPutResult(dst);
2105 } else {
2106 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2107 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2108 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2109 else
2110 ASSERT_NOT_REACHED();
2111 }
2112
2113 i += 5;
2114 break;
2115 }
2116 case op_get_by_val: {
2117 // The slow case that handles accesses to arrays (below) may jump back up to here.
2118 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2119
2120 X86Assembler::JmpSrc notImm = iter->from;
2121 m_jit.link((++iter)->from, m_jit.label());
2122 m_jit.link((++iter)->from, m_jit.label());
2123 emitFastArithIntToImmNoCheck(X86::edx);
2124 m_jit.link(notImm, m_jit.label());
2125 emitPutArg(X86::eax, 0);
2126 emitPutArg(X86::edx, 4);
2127 emitCall(i, Machine::cti_op_get_by_val);
2128 emitPutResult(instruction[i + 1].u.operand);
2129 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2130
2131 // This is slow case that handles accesses to arrays above the fast cut-off.
2132 // First, check if this is an access to the vector
2133 m_jit.link((++iter)->from, m_jit.label());
2134 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2135 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2136
2137 // okay, missed the fast region, but it is still in the vector. Get the value.
2138 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2139 // Check whether the value loaded is zero; if so we need to return undefined.
2140 m_jit.testl_rr(X86::ecx, X86::ecx);
2141 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2142 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2143
2144 i += 4;
2145 break;
2146 }
2147 case op_sub: {
2148 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2149 i += 5;
2150 break;
2151 }
2152 case op_rshift: {
2153 m_jit.link(iter->from, m_jit.label());
2154 m_jit.link((++iter)->from, m_jit.label());
2155 emitPutArg(X86::eax, 0);
2156 emitPutArg(X86::ecx, 4);
2157 emitCall(i, Machine::cti_op_rshift);
2158 emitPutResult(instruction[i + 1].u.operand);
2159 i += 4;
2160 break;
2161 }
2162 case op_lshift: {
2163 X86Assembler::JmpSrc notImm1 = iter->from;
2164 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2165 m_jit.link((++iter)->from, m_jit.label());
2166 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2167 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2168 m_jit.link(notImm1, m_jit.label());
2169 m_jit.link(notImm2, m_jit.label());
2170 emitPutArg(X86::eax, 0);
2171 emitPutArg(X86::ecx, 4);
2172 emitCall(i, Machine::cti_op_lshift);
2173 emitPutResult(instruction[i + 1].u.operand);
2174 i += 4;
2175 break;
2176 }
2177 case op_loop_if_less: {
2178 emitSlowScriptCheck(i);
2179
2180 unsigned target = instruction[i + 3].u.operand;
2181 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2182 if (src2imm) {
2183 m_jit.link(iter->from, m_jit.label());
2184 emitPutArg(X86::edx, 0);
2185 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2186 emitCall(i, Machine::cti_op_loop_if_less);
2187 m_jit.testl_rr(X86::eax, X86::eax);
2188 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2189 } else {
2190 m_jit.link(iter->from, m_jit.label());
2191 m_jit.link((++iter)->from, m_jit.label());
2192 emitPutArg(X86::eax, 0);
2193 emitPutArg(X86::edx, 4);
2194 emitCall(i, Machine::cti_op_loop_if_less);
2195 m_jit.testl_rr(X86::eax, X86::eax);
2196 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2197 }
2198 i += 4;
2199 break;
2200 }
2201 case op_put_by_id: {
2202 m_jit.link(iter->from, m_jit.label());
2203 m_jit.link((++iter)->from, m_jit.label());
2204
2205 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2206 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2207 emitPutArg(X86::eax, 0);
2208 emitPutArg(X86::edx, 8);
2209 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
2210
2211 // Track the location of the call; this will be used to recover repatch information.
2212 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2213 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2214 ++structureIDInstructionIndex;
2215
2216 i += 8;
2217 break;
2218 }
2219 case op_get_by_id: {
2220 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2221 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2222 // of the call (which we can use to look up the repatch information), but should a array-length or
2223 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2224 // the distance from the call to the head of the slow case.
2225
2226 m_jit.link(iter->from, m_jit.label());
2227 m_jit.link((++iter)->from, m_jit.label());
2228
2229#ifndef NDEBUG
2230 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2231#endif
2232 emitPutArg(X86::eax, 0);
2233 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2234 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2235 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
2236 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2237 emitPutResult(instruction[i + 1].u.operand);
2238
2239 // Track the location of the call; this will be used to recover repatch information.
2240 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2241 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2242 ++structureIDInstructionIndex;
2243
2244 i += 8;
2245 break;
2246 }
2247 case op_resolve_global: {
2248 ++structureIDInstructionIndex;
2249 i += 6;
2250 break;
2251 }
2252 case op_loop_if_lesseq: {
2253 emitSlowScriptCheck(i);
2254
2255 unsigned target = instruction[i + 3].u.operand;
2256 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2257 if (src2imm) {
2258 m_jit.link(iter->from, m_jit.label());
2259 emitPutArg(X86::edx, 0);
2260 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2261 emitCall(i, Machine::cti_op_loop_if_lesseq);
2262 m_jit.testl_rr(X86::eax, X86::eax);
2263 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2264 } else {
2265 m_jit.link(iter->from, m_jit.label());
2266 m_jit.link((++iter)->from, m_jit.label());
2267 emitPutArg(X86::eax, 0);
2268 emitPutArg(X86::edx, 4);
2269 emitCall(i, Machine::cti_op_loop_if_lesseq);
2270 m_jit.testl_rr(X86::eax, X86::eax);
2271 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2272 }
2273 i += 4;
2274 break;
2275 }
2276 case op_pre_inc: {
2277 unsigned srcDst = instruction[i + 1].u.operand;
2278 X86Assembler::JmpSrc notImm = iter->from;
2279 m_jit.link((++iter)->from, m_jit.label());
2280 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2281 m_jit.link(notImm, m_jit.label());
2282 emitPutArg(X86::eax, 0);
2283 emitCall(i, Machine::cti_op_pre_inc);
2284 emitPutResult(srcDst);
2285 i += 2;
2286 break;
2287 }
2288 case op_put_by_val: {
2289 // Normal slow cases - either is not an immediate imm, or is an array.
2290 X86Assembler::JmpSrc notImm = iter->from;
2291 m_jit.link((++iter)->from, m_jit.label());
2292 m_jit.link((++iter)->from, m_jit.label());
2293 emitFastArithIntToImmNoCheck(X86::edx);
2294 m_jit.link(notImm, m_jit.label());
2295 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2296 emitPutArg(X86::eax, 0);
2297 emitPutArg(X86::edx, 4);
2298 emitPutArg(X86::ecx, 8);
2299 emitCall(i, Machine::cti_op_put_by_val);
2300 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2301
2302 // slow cases for immediate int accesses to arrays
2303 m_jit.link((++iter)->from, m_jit.label());
2304 m_jit.link((++iter)->from, m_jit.label());
2305 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2306 emitPutArg(X86::eax, 0);
2307 emitPutArg(X86::edx, 4);
2308 emitPutArg(X86::ecx, 8);
2309 emitCall(i, Machine::cti_op_put_by_val_array);
2310
2311 i += 4;
2312 break;
2313 }
2314 case op_loop_if_true: {
2315 emitSlowScriptCheck(i);
2316
2317 m_jit.link(iter->from, m_jit.label());
2318 emitPutArg(X86::eax, 0);
2319 emitCall(i, Machine::cti_op_jtrue);
2320 m_jit.testl_rr(X86::eax, X86::eax);
2321 unsigned target = instruction[i + 2].u.operand;
2322 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2323 i += 3;
2324 break;
2325 }
2326 case op_pre_dec: {
2327 unsigned srcDst = instruction[i + 1].u.operand;
2328 X86Assembler::JmpSrc notImm = iter->from;
2329 m_jit.link((++iter)->from, m_jit.label());
2330 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2331 m_jit.link(notImm, m_jit.label());
2332 emitPutArg(X86::eax, 0);
2333 emitCall(i, Machine::cti_op_pre_dec);
2334 emitPutResult(srcDst);
2335 i += 2;
2336 break;
2337 }
2338 case op_jnless: {
2339 unsigned target = instruction[i + 3].u.operand;
2340 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2341 if (src2imm) {
2342 m_jit.link(iter->from, m_jit.label());
2343 emitPutArg(X86::edx, 0);
2344 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2345 emitCall(i, Machine::cti_op_jless);
2346 m_jit.testl_rr(X86::eax, X86::eax);
2347 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2348 } else {
2349 m_jit.link(iter->from, m_jit.label());
2350 m_jit.link((++iter)->from, m_jit.label());
2351 emitPutArg(X86::eax, 0);
2352 emitPutArg(X86::edx, 4);
2353 emitCall(i, Machine::cti_op_jless);
2354 m_jit.testl_rr(X86::eax, X86::eax);
2355 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2356 }
2357 i += 4;
2358 break;
2359 }
2360 case op_not: {
2361 m_jit.link(iter->from, m_jit.label());
2362 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2363 emitPutArg(X86::eax, 0);
2364 emitCall(i, Machine::cti_op_not);
2365 emitPutResult(instruction[i + 1].u.operand);
2366 i += 3;
2367 break;
2368 }
2369 case op_jfalse: {
2370 m_jit.link(iter->from, m_jit.label());
2371 emitPutArg(X86::eax, 0);
2372 emitCall(i, Machine::cti_op_jtrue);
2373 m_jit.testl_rr(X86::eax, X86::eax);
2374 unsigned target = instruction[i + 2].u.operand;
2375 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2376 i += 3;
2377 break;
2378 }
2379 case op_post_inc: {
2380 unsigned srcDst = instruction[i + 2].u.operand;
2381 m_jit.link(iter->from, m_jit.label());
2382 m_jit.link((++iter)->from, m_jit.label());
2383 emitPutArg(X86::eax, 0);
2384 emitCall(i, Machine::cti_op_post_inc);
2385 emitPutResult(instruction[i + 1].u.operand);
2386 emitPutResult(srcDst, X86::edx);
2387 i += 3;
2388 break;
2389 }
2390 case op_bitnot: {
2391 m_jit.link(iter->from, m_jit.label());
2392 emitPutArg(X86::eax, 0);
2393 emitCall(i, Machine::cti_op_bitnot);
2394 emitPutResult(instruction[i + 1].u.operand);
2395 i += 3;
2396 break;
2397 }
2398 case op_bitand: {
2399 unsigned src1 = instruction[i + 2].u.operand;
2400 unsigned src2 = instruction[i + 3].u.operand;
2401 unsigned dst = instruction[i + 1].u.operand;
2402 if (getConstantImmediateNumericArg(src1)) {
2403 m_jit.link(iter->from, m_jit.label());
2404 emitGetPutArg(src1, 0, X86::ecx);
2405 emitPutArg(X86::eax, 4);
2406 emitCall(i, Machine::cti_op_bitand);
2407 emitPutResult(dst);
2408 } else if (getConstantImmediateNumericArg(src2)) {
2409 m_jit.link(iter->from, m_jit.label());
2410 emitPutArg(X86::eax, 0);
2411 emitGetPutArg(src2, 4, X86::ecx);
2412 emitCall(i, Machine::cti_op_bitand);
2413 emitPutResult(dst);
2414 } else {
2415 m_jit.link(iter->from, m_jit.label());
2416 emitGetPutArg(src1, 0, X86::ecx);
2417 emitPutArg(X86::edx, 4);
2418 emitCall(i, Machine::cti_op_bitand);
2419 emitPutResult(dst);
2420 }
2421 i += 5;
2422 break;
2423 }
2424 case op_jtrue: {
2425 m_jit.link(iter->from, m_jit.label());
2426 emitPutArg(X86::eax, 0);
2427 emitCall(i, Machine::cti_op_jtrue);
2428 m_jit.testl_rr(X86::eax, X86::eax);
2429 unsigned target = instruction[i + 2].u.operand;
2430 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2431 i += 3;
2432 break;
2433 }
2434 case op_post_dec: {
2435 unsigned srcDst = instruction[i + 2].u.operand;
2436 m_jit.link(iter->from, m_jit.label());
2437 m_jit.link((++iter)->from, m_jit.label());
2438 emitPutArg(X86::eax, 0);
2439 emitCall(i, Machine::cti_op_post_dec);
2440 emitPutResult(instruction[i + 1].u.operand);
2441 emitPutResult(srcDst, X86::edx);
2442 i += 3;
2443 break;
2444 }
2445 case op_bitxor: {
2446 m_jit.link(iter->from, m_jit.label());
2447 emitPutArg(X86::eax, 0);
2448 emitPutArg(X86::edx, 4);
2449 emitCall(i, Machine::cti_op_bitxor);
2450 emitPutResult(instruction[i + 1].u.operand);
2451 i += 5;
2452 break;
2453 }
2454 case op_bitor: {
2455 m_jit.link(iter->from, m_jit.label());
2456 emitPutArg(X86::eax, 0);
2457 emitPutArg(X86::edx, 4);
2458 emitCall(i, Machine::cti_op_bitor);
2459 emitPutResult(instruction[i + 1].u.operand);
2460 i += 5;
2461 break;
2462 }
2463 case op_eq: {
2464 m_jit.link(iter->from, m_jit.label());
2465 emitPutArg(X86::eax, 0);
2466 emitPutArg(X86::edx, 4);
2467 emitCall(i, Machine::cti_op_eq);
2468 emitPutResult(instruction[i + 1].u.operand);
2469 i += 4;
2470 break;
2471 }
2472 case op_neq: {
2473 m_jit.link(iter->from, m_jit.label());
2474 emitPutArg(X86::eax, 0);
2475 emitPutArg(X86::edx, 4);
2476 emitCall(i, Machine::cti_op_neq);
2477 emitPutResult(instruction[i + 1].u.operand);
2478 i += 4;
2479 break;
2480 }
2481 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2482 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2483 case op_instanceof: {
2484 m_jit.link(iter->from, m_jit.label());
2485 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2486 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2487 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2488 emitCall(i, Machine::cti_op_instanceof);
2489 emitPutResult(instruction[i + 1].u.operand);
2490 i += 5;
2491 break;
2492 }
2493 case op_mod: {
2494 X86Assembler::JmpSrc notImm1 = iter->from;
2495 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2496 m_jit.link((++iter)->from, m_jit.label());
2497 emitFastArithReTagImmediate(X86::eax);
2498 emitFastArithReTagImmediate(X86::ecx);
2499 m_jit.link(notImm1, m_jit.label());
2500 m_jit.link(notImm2, m_jit.label());
2501 emitPutArg(X86::eax, 0);
2502 emitPutArg(X86::ecx, 4);
2503 emitCall(i, Machine::cti_op_mod);
2504 emitPutResult(instruction[i + 1].u.operand);
2505 i += 4;
2506 break;
2507 }
2508 case op_mul: {
2509 int dst = instruction[i + 1].u.operand;
2510 int src1 = instruction[i + 2].u.operand;
2511 int src2 = instruction[i + 3].u.operand;
2512 if (getConstantImmediateNumericArg(src1) || getConstantImmediateNumericArg(src2)) {
2513 m_jit.link(iter->from, m_jit.label());
2514 emitGetPutArg(src1, 0, X86::ecx);
2515 emitGetPutArg(src2, 4, X86::ecx);
2516 emitCall(i, Machine::cti_op_mul);
2517 emitPutResult(dst);
2518 } else
2519 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2520 i += 5;
2521 break;
2522 }
2523
2524 case op_call:
2525 case op_call_eval:
2526 case op_construct: {
2527 m_jit.link(iter->from, m_jit.label());
2528
2529 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2530 emitCall(i, Machine::cti_vm_compile);
2531 emitCall(i, X86::eax);
2532
2533 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2534
2535 // Put the return value in dst. In the interpreter, op_ret does this.
2536 emitPutResult(instruction[i + 1].u.operand);
2537 i += 7;
2538 break;
2539 }
2540 case op_to_jsnumber: {
2541 m_jit.link(iter->from, m_jit.label());
2542 m_jit.link(iter->from, m_jit.label());
2543
2544 emitPutArg(X86::eax, 0);
2545 emitCall(i, Machine::cti_op_to_jsnumber);
2546
2547 emitPutResult(instruction[i + 1].u.operand);
2548 i += 3;
2549 break;
2550 }
2551
2552 default:
2553 ASSERT_NOT_REACHED();
2554 break;
2555 }
2556
2557 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2558 }
2559
2560 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2561}
2562
2563void CTI::privateCompile()
2564{
2565 // Could use a popl_m, but would need to offset the following instruction if so.
2566 m_jit.popl_r(X86::ecx);
2567 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2568
2569 privateCompileMainPass();
2570 privateCompileLinkPass();
2571 privateCompileSlowCases();
2572
2573 ASSERT(m_jmpTable.isEmpty());
2574
2575 void* code = m_jit.copy();
2576 ASSERT(code);
2577
2578 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2579 for (unsigned i = 0; i < m_switches.size(); ++i) {
2580 SwitchRecord record = m_switches[i];
2581 unsigned opcodeIndex = record.m_opcodeIndex;
2582
2583 if (record.m_type != SwitchRecord::String) {
2584 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2585 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2586
2587 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2588
2589 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2590 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2591 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2592 }
2593 } else {
2594 ASSERT(record.m_type == SwitchRecord::String);
2595
2596 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2597
2598 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2599 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2600 unsigned offset = it->second.branchOffset;
2601 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2602 }
2603 }
2604 }
2605
2606 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2607 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2608
2609 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2610 if (iter->to)
2611 X86Assembler::link(code, iter->from, iter->to);
2612 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2613 }
2614
2615 // Link absolute addresses for jsr
2616 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2617 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2618
2619 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2620 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2621 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2622 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2623 }
2624
2625 m_codeBlock->ctiCode = code;
2626}
2627
2628void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2629{
2630 // Check eax is an object of the right StructureID.
2631 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2632 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2633 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2634 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2635
2636 // Checks out okay! - getDirectOffset
2637 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2638 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2639 m_jit.ret();
2640
2641 void* code = m_jit.copy();
2642 ASSERT(code);
2643
2644 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2645 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2646
2647 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2648
2649 ctiRepatchCallByReturnAddress(returnAddress, code);
2650}
2651
2652void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2653{
2654#if USE(CTI_REPATCH_PIC)
2655 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2656
2657 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2658 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2659
2660 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2661 // referencing the prototype object - let's speculatively load it's table nice and early!)
2662 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2663 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2664 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2665
2666 // check eax is an object of the right StructureID.
2667 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2668 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2669 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2670 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2671
2672 // Check the prototype object's StructureID had not changed.
2673 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2674 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2675 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2676
2677 // Checks out okay! - getDirectOffset
2678 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2679
2680 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2681
2682 void* code = m_jit.copy();
2683 ASSERT(code);
2684
2685 // Use the repatch information to link the failure cases back to the original slow case routine.
2686 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2687 X86Assembler::link(code, failureCases1, slowCaseBegin);
2688 X86Assembler::link(code, failureCases2, slowCaseBegin);
2689 X86Assembler::link(code, failureCases3, slowCaseBegin);
2690
2691 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2692 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2693 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2694
2695 // Track the stub we have created so that it will be deleted later.
2696 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2697
2698 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2699 // FIXME: should revert this repatching, on failure.
2700 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2701 X86Assembler::repatchBranchOffset(jmpLocation, code);
2702#else
2703 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2704 // referencing the prototype object - let's speculatively load it's table nice and early!)
2705 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2706 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2707 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2708
2709 // check eax is an object of the right StructureID.
2710 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2711 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2712 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2713 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2714
2715 // Check the prototype object's StructureID had not changed.
2716 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2717 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2718 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2719
2720 // Checks out okay! - getDirectOffset
2721 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2722
2723 m_jit.ret();
2724
2725 void* code = m_jit.copy();
2726 ASSERT(code);
2727
2728 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2729 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2730 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2731
2732 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2733
2734 ctiRepatchCallByReturnAddress(returnAddress, code);
2735#endif
2736}
2737
2738void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2739{
2740 ASSERT(count);
2741
2742 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2743
2744 // Check eax is an object of the right StructureID.
2745 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2746 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2747 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2748 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2749
2750 StructureID* currStructureID = structureID;
2751 RefPtr<StructureID>* chainEntries = chain->head();
2752 JSObject* protoObject = 0;
2753 for (unsigned i = 0; i<count; ++i) {
2754 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_callFrame));
2755 currStructureID = chainEntries[i].get();
2756
2757 // Check the prototype object's StructureID had not changed.
2758 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2759 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2760 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2761 }
2762 ASSERT(protoObject);
2763
2764 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2765 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2766 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2767 m_jit.ret();
2768
2769 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2770
2771 void* code = m_jit.copy();
2772 ASSERT(code);
2773
2774 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2775 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2776
2777 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2778
2779 ctiRepatchCallByReturnAddress(returnAddress, code);
2780}
2781
2782void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2783{
2784 // check eax is an object of the right StructureID.
2785 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2786 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2787 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2788 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2789
2790 // checks out okay! - putDirectOffset
2791 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2792 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2793 m_jit.ret();
2794
2795 void* code = m_jit.copy();
2796 ASSERT(code);
2797
2798 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2799 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2800
2801 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2802
2803 ctiRepatchCallByReturnAddress(returnAddress, code);
2804}
2805
2806extern "C" {
2807
2808static JSValue* transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2809{
2810 baseObject->transitionTo(newStructureID);
2811 baseObject->putDirectOffset(cachedOffset, value);
2812 return baseObject;
2813}
2814
2815}
2816
2817static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2818{
2819 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
2820}
2821
2822void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2823{
2824 Vector<X86Assembler::JmpSrc, 16> failureCases;
2825 // check eax is an object of the right StructureID.
2826 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2827 failureCases.append(m_jit.emitUnlinkedJne());
2828 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2829 failureCases.append(m_jit.emitUnlinkedJne());
2830 Vector<X86Assembler::JmpSrc> successCases;
2831
2832 // ecx = baseObject
2833 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2834 // proto(ecx) = baseObject->structureID()->prototype()
2835 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2836 failureCases.append(m_jit.emitUnlinkedJne());
2837 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2838
2839 // ecx = baseObject->m_structureID
2840 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2841 // null check the prototype
2842 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2843 successCases.append(m_jit.emitUnlinkedJe());
2844
2845 // Check the structure id
2846 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2847 failureCases.append(m_jit.emitUnlinkedJne());
2848
2849 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2850 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2851 failureCases.append(m_jit.emitUnlinkedJne());
2852 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2853 }
2854
2855 failureCases.append(m_jit.emitUnlinkedJne());
2856 for (unsigned i = 0; i < successCases.size(); ++i)
2857 m_jit.link(successCases[i], m_jit.label());
2858
2859 X86Assembler::JmpSrc callTarget;
2860 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2861 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2862 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2863 // codeblock should ensure oldStructureID->m_refCount > 0
2864 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2865 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2866 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2867
2868 // write the value
2869 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2870 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2871 } else {
2872 // Slow case transition -- we're going to need to quite a bit of work,
2873 // so just make a call
2874 m_jit.pushl_r(X86::edx);
2875 m_jit.pushl_r(X86::eax);
2876 m_jit.movl_i32r(cachedOffset, X86::eax);
2877 m_jit.pushl_r(X86::eax);
2878 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2879 m_jit.pushl_r(X86::eax);
2880 callTarget = m_jit.emitCall();
2881 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2882 }
2883 m_jit.ret();
2884
2885 X86Assembler::JmpSrc failureJump;
2886 if (failureCases.size()) {
2887 for (unsigned i = 0; i < failureCases.size(); ++i)
2888 m_jit.link(failureCases[i], m_jit.label());
2889 m_jit.emitRestoreArgumentReferenceForTrampoline();
2890 failureJump = m_jit.emitUnlinkedJmp();
2891 }
2892
2893 void* code = m_jit.copy();
2894 ASSERT(code);
2895
2896 if (failureCases.size())
2897 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2898
2899 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2900 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2901
2902 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2903
2904 ctiRepatchCallByReturnAddress(returnAddress, code);
2905}
2906
2907void* CTI::privateCompileArrayLengthTrampoline()
2908{
2909 // Check eax is an array
2910 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2911 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2912 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2913 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2914
2915 // Checks out okay! - get the length from the storage
2916 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2917 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2918
2919 m_jit.addl_rr(X86::eax, X86::eax);
2920 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2921 m_jit.addl_i8r(1, X86::eax);
2922
2923 m_jit.ret();
2924
2925 void* code = m_jit.copy();
2926 ASSERT(code);
2927
2928 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2929 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2930 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2931
2932 return code;
2933}
2934
2935void* CTI::privateCompileStringLengthTrampoline()
2936{
2937 // Check eax is a string
2938 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2939 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2940 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2941 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2942
2943 // Checks out okay! - get the length from the Ustring.
2944 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2945 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2946
2947 m_jit.addl_rr(X86::eax, X86::eax);
2948 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2949 m_jit.addl_i8r(1, X86::eax);
2950
2951 m_jit.ret();
2952
2953 void* code = m_jit.copy();
2954 ASSERT(code);
2955
2956 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2957 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2958 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2959
2960 return code;
2961}
2962
2963void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2964{
2965 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2966
2967 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2968 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2969 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2970
2971 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2972 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2973 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2974}
2975
2976void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2977{
2978 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2979
2980 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2981 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2982 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2983
2984 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2985 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2986 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2987}
2988
2989void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2990{
2991 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2992
2993 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2994 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2995
2996 // Check eax is an array
2997 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2998 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2999 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
3000 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3001
3002 // Checks out okay! - get the length from the storage
3003 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
3004 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
3005
3006 m_jit.addl_rr(X86::ecx, X86::ecx);
3007 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
3008 m_jit.addl_i8r(1, X86::ecx);
3009
3010 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3011
3012 m_jit.link(failureClobberedECX, m_jit.label());
3013 m_jit.emitRestoreArgumentReference();
3014 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
3015
3016 void* code = m_jit.copy();
3017 ASSERT(code);
3018
3019 // Use the repatch information to link the failure cases back to the original slow case routine.
3020 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3021 X86Assembler::link(code, failureCases1, slowCaseBegin);
3022 X86Assembler::link(code, failureCases2, slowCaseBegin);
3023 X86Assembler::link(code, failureCases3, slowCaseBegin);
3024
3025 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3026 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3027 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3028
3029 // Track the stub we have created so that it will be deleted later.
3030 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3031
3032 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3033 // FIXME: should revert this repatching, on failure.
3034 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3035 X86Assembler::repatchBranchOffset(jmpLocation, code);
3036}
3037
3038void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3039{
3040 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3041 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3042 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3043}
3044
3045void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3046{
3047 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3048 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3049 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3050}
3051
3052#if ENABLE(WREC)
3053
3054void* CTI::compileRegExp(Machine* machine, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3055{
3056 // TODO: better error messages
3057 if (pattern.size() > MaxPatternSize) {
3058 *error_ptr = "regular expression too large";
3059 return 0;
3060 }
3061
3062 X86Assembler jit(machine->jitCodeBuffer());
3063 WRECParser parser(pattern, ignoreCase, multiline, jit);
3064
3065 jit.emitConvertToFastCall();
3066 // (0) Setup:
3067 // Preserve regs & initialize outputRegister.
3068 jit.pushl_r(WRECGenerator::outputRegister);
3069 jit.pushl_r(WRECGenerator::currentValueRegister);
3070 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3071 jit.pushl_r(WRECGenerator::currentPositionRegister);
3072 // load output pointer
3073 jit.movl_mr(16
3074#if COMPILER(MSVC)
3075 + 3 * sizeof(void*)
3076#endif
3077 , X86::esp, WRECGenerator::outputRegister);
3078
3079 // restart point on match fail.
3080 WRECGenerator::JmpDst nextLabel = jit.label();
3081
3082 // (1) Parse Disjunction:
3083
3084 // Parsing the disjunction should fully consume the pattern.
3085 JmpSrcVector failures;
3086 parser.parseDisjunction(failures);
3087 if (parser.isEndOfPattern()) {
3088 parser.m_err = WRECParser::Error_malformedPattern;
3089 }
3090 if (parser.m_err) {
3091 // TODO: better error messages
3092 *error_ptr = "TODO: better error messages";
3093 return 0;
3094 }
3095
3096 // (2) Success:
3097 // Set return value & pop registers from the stack.
3098
3099 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3100 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3101
3102 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3103 jit.popl_r(X86::eax);
3104 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3105 jit.popl_r(WRECGenerator::currentValueRegister);
3106 jit.popl_r(WRECGenerator::outputRegister);
3107 jit.ret();
3108
3109 jit.link(noOutput, jit.label());
3110
3111 jit.popl_r(X86::eax);
3112 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3113 jit.popl_r(WRECGenerator::currentValueRegister);
3114 jit.popl_r(WRECGenerator::outputRegister);
3115 jit.ret();
3116
3117 // (3) Failure:
3118 // All fails link to here. Progress the start point & if it is within scope, loop.
3119 // Otherwise, return fail value.
3120 WRECGenerator::JmpDst here = jit.label();
3121 for (unsigned i = 0; i < failures.size(); ++i)
3122 jit.link(failures[i], here);
3123 failures.clear();
3124
3125 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3126 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3127 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3128 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3129 jit.link(jit.emitUnlinkedJle(), nextLabel);
3130
3131 jit.addl_i8r(4, X86::esp);
3132
3133 jit.movl_i32r(-1, X86::eax);
3134 jit.popl_r(WRECGenerator::currentValueRegister);
3135 jit.popl_r(WRECGenerator::outputRegister);
3136 jit.ret();
3137
3138 *numSubpatterns_ptr = parser.m_numSubpatterns;
3139
3140 void* code = jit.copy();
3141 ASSERT(code);
3142 return code;
3143}
3144
3145#endif // ENABLE(WREC)
3146
3147} // namespace JSC
3148
3149#endif // ENABLE(CTI)
Note: See TracBrowser for help on using the repository browser.