Changeset 38984 in webkit for trunk/JavaScriptCore/jit/JITCall.cpp
- Timestamp:
- Dec 3, 2008, 9:43:14 PM (16 years ago)
- File:
-
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/jit/JITCall.cpp
r38983 r38984 30 30 31 31 #include "CodeBlock.h" 32 #include "JITInlineMethods.h" 32 33 #include "JSArray.h" 33 34 #include "JSFunction.h" … … 40 41 #endif 41 42 42 #define __ m_assembler.43 44 #if PLATFORM(WIN)45 #undef FIELD_OFFSET // Fix conflict with winnt.h.46 #endif47 48 // FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.49 // The magic number 0x4000 is insignificant. We use it to avoid using NULL, since50 // NULL can cause compiler problems, especially in cases of multiple inheritance.51 #define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)52 53 43 using namespace std; 54 44 55 45 namespace JSC { 56 46 57 typedef X86Assembler::JmpSrc JmpSrc; 58 59 #if PLATFORM(MAC) 60 61 static inline bool isSSE2Present() 62 { 63 return true; // All X86 Macs are guaranteed to support at least SSE2 64 } 65 66 #else 67 68 static bool isSSE2Present() 69 { 70 static const int SSE2FeatureBit = 1 << 26; 71 struct SSE2Check { 72 SSE2Check() 73 { 74 int flags; 75 #if COMPILER(MSVC) 76 _asm { 77 mov eax, 1 // cpuid function 1 gives us the standard feature set 78 cpuid; 79 mov flags, edx; 80 } 81 #else 82 flags = 0; 83 // FIXME: Add GCC code to do above asm 84 #endif 85 present = (flags & SSE2FeatureBit) != 0; 86 } 87 bool present; 88 }; 89 static SSE2Check check; 90 return check.present; 91 } 92 93 #endif 94 95 COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C); 96 COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E); 97 98 #if COMPILER(GCC) && PLATFORM(X86) 99 100 #if PLATFORM(DARWIN) 101 #define SYMBOL_STRING(name) "_" #name 102 #else 103 #define SYMBOL_STRING(name) #name 104 #endif 105 106 asm( 107 ".globl " SYMBOL_STRING(ctiTrampoline) "\n" 108 SYMBOL_STRING(ctiTrampoline) ":" "\n" 109 "pushl %esi" "\n" 110 "pushl %edi" "\n" 111 "pushl %ebx" "\n" 112 "subl $0x20, %esp" "\n" 113 "movl $512, %esi" "\n" 114 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above) 115 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above) 116 "addl $0x20, %esp" "\n" 117 "popl %ebx" "\n" 118 "popl %edi" "\n" 119 "popl %esi" "\n" 120 "ret" "\n" 121 ); 122 123 asm( 124 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" 125 SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" 126 #if USE(CTI_ARGUMENT) 127 #if USE(FAST_CALL_CTI_ARGUMENT) 128 "movl %esp, %ecx" "\n" 129 #else 130 "movl %esp, 0(%esp)" "\n" 131 #endif 132 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n" 133 #else 134 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz) "\n" 135 #endif 136 "addl $0x20, %esp" "\n" 137 "popl %ebx" "\n" 138 "popl %edi" "\n" 139 "popl %esi" "\n" 140 "ret" "\n" 141 ); 47 void JIT::unlinkCall(CallLinkInfo* callLinkInfo) 48 { 49 // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid 50 // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive 51 // match). Reset the check so it no longer matches. 52 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = asPointer(JSImmediate::impossibleValue()); 53 } 54 55 void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount) 56 { 57 // Currently we only link calls with the exact number of arguments. 58 if (callerArgCount == calleeCodeBlock->numParameters) { 59 ASSERT(!callLinkInfo->isLinked()); 142 60 143 #elif COMPILER(MSVC) 144 145 extern "C" { 61 calleeCodeBlock->addCaller(callLinkInfo); 146 62 147 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue** exception, Profiler**, JSGlobalData*) 148 { 149 __asm { 150 push esi; 151 push edi; 152 push ebx; 153 sub esp, 0x20; 154 mov esi, 512; 155 mov ecx, esp; 156 mov edi, [esp + 0x38]; 157 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above) 158 add esp, 0x20; 159 pop ebx; 160 pop edi; 161 pop esi; 162 ret; 163 } 164 } 165 166 __declspec(naked) void ctiVMThrowTrampoline() 167 { 168 __asm { 169 mov ecx, esp; 170 call JSC::Interpreter::cti_vm_throw; 171 add esp, 0x20; 172 pop ebx; 173 pop edi; 174 pop esi; 175 ret; 176 } 177 } 178 179 } 180 181 #endif 182 183 static ALWAYS_INLINE uintptr_t asInteger(JSValue* value) 184 { 185 return reinterpret_cast<uintptr_t>(value); 186 } 187 188 ALWAYS_INLINE void JIT::killLastResultRegister() 189 { 190 m_lastResultBytecodeRegister = std::numeric_limits<int>::max(); 191 } 192 193 // get arg puts an arg from the SF register array into a h/w register 194 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst, unsigned currentInstructionIndex) 195 { 196 // TODO: we want to reuse values that are already in registers if we can - add a register allocator! 197 if (m_codeBlock->isConstantRegisterIndex(src)) { 198 JSValue* value = m_codeBlock->getConstant(src); 199 __ movl_i32r(asInteger(value), dst); 200 killLastResultRegister(); 201 return; 202 } 203 204 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) { 205 bool atJumpTarget = false; 206 while (m_jumpTargetsPosition < m_codeBlock->jumpTargets.size() && m_codeBlock->jumpTargets[m_jumpTargetsPosition] <= currentInstructionIndex) { 207 if (m_codeBlock->jumpTargets[m_jumpTargetsPosition] == currentInstructionIndex) 208 atJumpTarget = true; 209 ++m_jumpTargetsPosition; 210 } 211 212 if (!atJumpTarget) { 213 // The argument we want is already stored in eax 214 if (dst != X86::eax) 215 __ movl_rr(X86::eax, dst); 216 killLastResultRegister(); 217 return; 218 } 219 } 220 221 __ movl_mr(src * sizeof(Register), X86::edi, dst); 222 killLastResultRegister(); 223 } 224 225 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2, unsigned i) 226 { 227 if (src2 == m_lastResultBytecodeRegister) { 228 emitGetVirtualRegister(src2, dst2, i); 229 emitGetVirtualRegister(src1, dst1, i); 230 } else { 231 emitGetVirtualRegister(src1, dst1, i); 232 emitGetVirtualRegister(src2, dst2, i); 233 } 234 } 235 236 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function. 237 ALWAYS_INLINE void JIT::emitPutCTIArgFromVirtualRegister(unsigned src, unsigned offset, RegisterID scratch) 238 { 239 if (m_codeBlock->isConstantRegisterIndex(src)) { 240 JSValue* value = m_codeBlock->getConstant(src); 241 __ movl_i32m(asInteger(value), offset + sizeof(void*), X86::esp); 242 } else { 243 __ movl_mr(src * sizeof(Register), X86::edi, scratch); 244 __ movl_rm(scratch, offset + sizeof(void*), X86::esp); 245 } 246 247 killLastResultRegister(); 248 } 249 250 // puts an arg onto the stack, as an arg to a context threaded function. 251 ALWAYS_INLINE void JIT::emitPutCTIArg(RegisterID src, unsigned offset) 252 { 253 __ movl_rm(src, offset + sizeof(void*), X86::esp); 254 } 255 256 ALWAYS_INLINE void JIT::emitGetCTIArg(unsigned offset, RegisterID dst) 257 { 258 __ movl_mr(offset + sizeof(void*), X86::esp, dst); 259 } 260 261 262 ALWAYS_INLINE void JIT::emitPutCTIArgConstant(unsigned value, unsigned offset) 263 { 264 __ movl_i32m(value, offset + sizeof(void*), X86::esp); 265 } 266 267 ALWAYS_INLINE JSValue* JIT::getConstantImmediateNumericArg(unsigned src) 268 { 269 if (m_codeBlock->isConstantRegisterIndex(src)) { 270 JSValue* value = m_codeBlock->getConstant(src); 271 return JSImmediate::isNumber(value) ? value : noValue(); 272 } 273 return noValue(); 274 } 275 276 ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name) 277 { 278 __ movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp); 279 } 280 281 ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name) 282 { 283 __ movl_rm(from, name * sizeof(void*), X86::esp); 284 } 285 286 ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to) 287 { 288 __ movl_mr(name * sizeof(void*), X86::esp, to); 289 killLastResultRegister(); 290 } 291 292 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) 293 { 294 __ movl_rm(from, entry * sizeof(Register), X86::edi); 295 } 296 297 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to) 298 { 299 __ movl_mr(entry * sizeof(Register), X86::edi, to); 300 killLastResultRegister(); 301 } 302 303 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) 304 { 305 __ movl_rm(from, dst * sizeof(Register), X86::edi); 306 m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max(); 307 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. 308 } 309 310 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) 311 { 312 __ movl_i32m(asInteger(jsUndefined()), dst * sizeof(Register), X86::edi); 313 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. 314 } 315 316 void ctiSetReturnAddress(void** where, void* what) 317 { 318 *where = what; 319 } 320 321 void ctiRepatchCallByReturnAddress(void* where, void* what) 322 { 323 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where)); 324 } 325 326 #ifndef NDEBUG 327 328 void JIT::printBytecodeOperandTypes(unsigned src1, unsigned src2) 329 { 330 char which1 = '*'; 331 if (m_codeBlock->isConstantRegisterIndex(src1)) { 332 JSValue* value = m_codeBlock->getConstant(src1); 333 which1 = 334 JSImmediate::isImmediate(value) ? 335 (JSImmediate::isNumber(value) ? 'i' : 336 JSImmediate::isBoolean(value) ? 'b' : 337 value->isUndefined() ? 'u' : 338 value->isNull() ? 'n' : '?') 339 : 340 (value->isString() ? 's' : 341 value->isObject() ? 'o' : 342 'k'); 343 } 344 char which2 = '*'; 345 if (m_codeBlock->isConstantRegisterIndex(src2)) { 346 JSValue* value = m_codeBlock->getConstant(src2); 347 which2 = 348 JSImmediate::isImmediate(value) ? 349 (JSImmediate::isNumber(value) ? 'i' : 350 JSImmediate::isBoolean(value) ? 'b' : 351 value->isUndefined() ? 'u' : 352 value->isNull() ? 'n' : '?') 353 : 354 (value->isString() ? 's' : 355 value->isObject() ? 'o' : 356 'k'); 357 } 358 if ((which1 != '*') | (which2 != '*')) 359 fprintf(stderr, "Types %c %c\n", which1, which2); 360 } 361 362 #endif 363 364 ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, X86::RegisterID r) 365 { 366 JmpSrc call = __ call(r); 367 m_calls.append(CallRecord(call, bytecodeIndex)); 368 369 return call; 370 } 371 372 ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, void* function) 373 { 374 JmpSrc call = __ call(); 375 m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), bytecodeIndex)); 376 return call; 377 } 378 379 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_j helper) 380 { 381 #if ENABLE(OPCODE_SAMPLING) 382 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 383 #endif 384 __ restoreArgumentReference(); 385 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 386 JmpSrc call = __ call(); 387 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 388 #if ENABLE(OPCODE_SAMPLING) 389 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 390 #endif 391 killLastResultRegister(); 392 393 return call; 394 } 395 396 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_o helper) 397 { 398 #if ENABLE(OPCODE_SAMPLING) 399 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 400 #endif 401 __ restoreArgumentReference(); 402 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 403 JmpSrc call = __ call(); 404 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 405 #if ENABLE(OPCODE_SAMPLING) 406 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 407 #endif 408 killLastResultRegister(); 409 410 return call; 411 } 412 413 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_p helper) 414 { 415 #if ENABLE(OPCODE_SAMPLING) 416 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 417 #endif 418 __ restoreArgumentReference(); 419 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 420 JmpSrc call = __ call(); 421 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 422 #if ENABLE(OPCODE_SAMPLING) 423 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 424 #endif 425 killLastResultRegister(); 426 427 return call; 428 } 429 430 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_b helper) 431 { 432 #if ENABLE(OPCODE_SAMPLING) 433 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 434 #endif 435 __ restoreArgumentReference(); 436 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 437 JmpSrc call = __ call(); 438 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 439 #if ENABLE(OPCODE_SAMPLING) 440 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 441 #endif 442 killLastResultRegister(); 443 444 return call; 445 } 446 447 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_v helper) 448 { 449 #if ENABLE(OPCODE_SAMPLING) 450 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 451 #endif 452 __ restoreArgumentReference(); 453 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 454 JmpSrc call = __ call(); 455 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 456 #if ENABLE(OPCODE_SAMPLING) 457 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 458 #endif 459 killLastResultRegister(); 460 461 return call; 462 } 463 464 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_s helper) 465 { 466 #if ENABLE(OPCODE_SAMPLING) 467 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 468 #endif 469 __ restoreArgumentReference(); 470 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 471 JmpSrc call = __ call(); 472 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 473 #if ENABLE(OPCODE_SAMPLING) 474 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 475 #endif 476 killLastResultRegister(); 477 478 return call; 479 } 480 481 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_2 helper) 482 { 483 #if ENABLE(OPCODE_SAMPLING) 484 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot()); 485 #endif 486 __ restoreArgumentReference(); 487 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 488 JmpSrc call = __ call(); 489 m_calls.append(CallRecord(call, helper, bytecodeIndex)); 490 #if ENABLE(OPCODE_SAMPLING) 491 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot()); 492 #endif 493 killLastResultRegister(); 494 495 return call; 496 } 497 498 JmpSrc JIT::checkStructure(RegisterID reg, Structure* structure) 499 { 500 __ cmpl_i32m(reinterpret_cast<uint32_t>(structure), FIELD_OFFSET(JSCell, m_structure), reg); 501 return __ jne(); 502 } 503 504 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex) 505 { 506 __ testl_i32r(JSImmediate::TagMask, reg); 507 m_slowCases.append(SlowCaseEntry(__ jne(), bytecodeIndex)); 508 } 509 510 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex, int vReg) 511 { 512 if (m_codeBlock->isKnownNotImmediate(vReg)) 513 return; 514 515 emitJumpSlowCaseIfNotJSCell(reg, bytecodeIndex); 516 } 517 518 ALWAYS_INLINE bool JIT::linkSlowCaseIfNotJSCell(const Vector<SlowCaseEntry>::iterator& iter, int vReg) 519 { 520 if (m_codeBlock->isKnownNotImmediate(vReg)) 521 return false; 522 523 __ link(iter->from, __ label()); 524 return true; 525 } 526 527 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNum(RegisterID reg, unsigned bytecodeIndex) 528 { 529 __ testl_i32r(JSImmediate::TagBitTypeInteger, reg); 530 m_slowCases.append(SlowCaseEntry(__ je(), bytecodeIndex)); 531 } 532 533 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNums(RegisterID reg1, RegisterID reg2, unsigned bytecodeIndex) 534 { 535 __ movl_rr(reg1, X86::ecx); 536 __ andl_rr(reg2, X86::ecx); 537 emitJumpSlowCaseIfNotImmNum(X86::ecx, bytecodeIndex); 538 } 539 540 ALWAYS_INLINE unsigned JIT::getDeTaggedConstantImmediate(JSValue* imm) 541 { 542 ASSERT(JSImmediate::isNumber(imm)); 543 return asInteger(imm) & ~JSImmediate::TagBitTypeInteger; 544 } 545 546 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg) 547 { 548 __ subl_i8r(JSImmediate::TagBitTypeInteger, reg); 549 } 550 551 ALWAYS_INLINE JmpSrc JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg) 552 { 553 __ subl_i8r(JSImmediate::TagBitTypeInteger, reg); 554 return __ je(); 555 } 556 557 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID reg) 558 { 559 __ addl_i8r(JSImmediate::TagBitTypeInteger, reg); 560 } 561 562 ALWAYS_INLINE void JIT::emitFastArithPotentiallyReTagImmediate(RegisterID reg) 563 { 564 __ orl_i8r(JSImmediate::TagBitTypeInteger, reg); 565 } 566 567 ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg) 568 { 569 __ sarl_i8r(1, reg); 570 } 571 572 ALWAYS_INLINE void JIT::emitFastArithIntToImmOrSlowCase(RegisterID reg, unsigned bytecodeIndex) 573 { 574 __ addl_rr(reg, reg); 575 m_slowCases.append(SlowCaseEntry(__ jo(), bytecodeIndex)); 576 emitFastArithReTagImmediate(reg); 577 } 578 579 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID reg) 580 { 581 __ addl_rr(reg, reg); 582 emitFastArithReTagImmediate(reg); 583 } 584 585 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) 586 { 587 __ shl_i8r(JSImmediate::ExtendedPayloadShift, reg); 588 __ orl_i8r(JSImmediate::FullTagTypeBool, reg); 589 } 590 591 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) 592 : m_interpreter(globalData->interpreter) 593 , m_globalData(globalData) 594 , m_codeBlock(codeBlock) 595 , m_labels(codeBlock ? codeBlock->instructions.size() : 0) 596 , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->propertyAccessInstructions.size() : 0) 597 , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->callLinkInfos.size() : 0) 598 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max()) 599 , m_jumpTargetsPosition(0) 600 { 601 } 602 603 #define CTI_COMPILE_BINARY_OP(name) \ 604 case name: { \ 605 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \ 606 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \ 607 emitCTICall(i, Interpreter::cti_##name); \ 608 emitPutVirtualRegister(instruction[i + 1].u.operand); \ 609 i += 4; \ 610 break; \ 611 } 612 613 #define CTI_COMPILE_UNARY_OP(name) \ 614 case name: { \ 615 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \ 616 emitCTICall(i, Interpreter::cti_##name); \ 617 emitPutVirtualRegister(instruction[i + 1].u.operand); \ 618 i += 3; \ 619 break; \ 620 } 621 622 static void unreachable() 623 { 624 ASSERT_NOT_REACHED(); 625 exit(1); 63 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = callee; 64 ctiRepatchCallByReturnAddress(callLinkInfo->hotPathOther, ctiCode); 65 } 66 67 // repatch the instruction that jumps out to the cold path, so that we only try to link once. 68 void* repatchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + repatchOffsetOpCallCall); 69 ctiRepatchCallByReturnAddress(repatchCheck, callLinkInfo->coldPathOther); 626 70 } 627 71 … … 675 119 emitPutCTIArgConstant(thisRegister, 16); 676 120 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction), 20); 121 } 122 123 #if !ENABLE(JIT_OPTIMIZE_CALL) 124 125 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned) 126 { 127 int dst = instruction[1].u.operand; 128 int callee = instruction[2].u.operand; 129 int argCount = instruction[3].u.operand; 130 int registerOffset = instruction[4].u.operand; 131 132 // Handle eval 133 JmpSrc wasEval; 134 if (opcodeID == op_call_eval) { 135 emitGetVirtualRegister(callee, X86::ecx, i); 136 compileOpCallEvalSetupArgs(instruction); 137 138 emitCTICall(i, Interpreter::cti_op_call_eval); 139 __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax); 140 wasEval = __ jne(); 141 } 142 143 emitGetVirtualRegister(callee, X86::ecx, i); 144 // The arguments have been set up on the hot path for op_call_eval 145 if (opcodeID == op_call) 146 compileOpCallSetupArgs(instruction); 147 else if (opcodeID == op_construct) 148 compileOpConstructSetupArgs(instruction); 149 150 // Check for JSFunctions. 151 emitJumpSlowCaseIfNotJSCell(X86::ecx, i); 152 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); 153 m_slowCases.append(SlowCaseEntry(__ jne(), i)); 154 155 // First, in the case of a construct, allocate the new object. 156 if (opcodeID == op_construct) { 157 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); 158 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); 159 emitGetVirtualRegister(callee, X86::ecx, i); 160 } 161 162 // Speculatively roll the callframe, assuming argCount will match the arity. 163 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); 164 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); 165 __ movl_i32r(argCount, X86::edx); 166 167 emitNakedCall(i, m_interpreter->m_ctiVirtualCall); 168 169 if (opcodeID == op_call_eval) 170 __ link(wasEval, __ label()); 171 172 // Put the return value in dst. In the interpreter, op_ret does this. 173 emitPutVirtualRegister(dst); 174 175 #if ENABLE(CODEBLOCK_SAMPLING) 176 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); 177 #endif 178 } 179 180 void JIT::compileOpCallSlowCase(Instruction* instruction, unsigned i, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) 181 { 182 int dst = instruction[1].u.operand; 183 184 __ link(iter->from, __ label()); 185 __ link((++iter)->from, __ label()); 186 187 // This handles host functions 188 emitCTICall(i, ((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); 189 // Put the return value in dst. In the interpreter, op_ret does this. 190 emitPutVirtualRegister(dst); 191 192 #if ENABLE(CODEBLOCK_SAMPLING) 193 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); 194 #endif 195 } 196 197 #else 198 199 static void unreachable() 200 { 201 ASSERT_NOT_REACHED(); 202 exit(1); 677 203 } 678 204 … … 742 268 } 743 269 744 void JIT::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type) 745 { 746 bool negated = (type == OpNStrictEq); 747 748 unsigned dst = instruction[1].u.operand; 749 unsigned src1 = instruction[2].u.operand; 750 unsigned src2 = instruction[3].u.operand; 751 752 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i); 753 754 __ testl_i32r(JSImmediate::TagMask, X86::eax); 755 JmpSrc firstNotImmediate = __ je(); 756 __ testl_i32r(JSImmediate::TagMask, X86::edx); 757 JmpSrc secondNotImmediate = __ je(); 758 759 __ cmpl_rr(X86::edx, X86::eax); 760 if (negated) 761 __ setne_r(X86::eax); 762 else 763 __ sete_r(X86::eax); 764 __ movzbl_rr(X86::eax, X86::eax); 765 emitTagAsBoolImmediate(X86::eax); 766 767 JmpSrc bothWereImmediates = __ jmp(); 768 769 __ link(firstNotImmediate, __ label()); 770 771 // check that edx is immediate but not the zero immediate 772 __ testl_i32r(JSImmediate::TagMask, X86::edx); 773 __ setz_r(X86::ecx); 774 __ movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate 775 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::edx); 776 __ sete_r(X86::edx); 777 __ movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate 778 __ orl_rr(X86::ecx, X86::edx); 779 780 m_slowCases.append(SlowCaseEntry(__ jnz(), i)); 781 782 __ movl_i32r(asInteger(jsBoolean(negated)), X86::eax); 783 784 JmpSrc firstWasNotImmediate = __ jmp(); 785 786 __ link(secondNotImmediate, __ label()); 787 // check that eax is not the zero immediate (we know it must be immediate) 788 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax); 789 m_slowCases.append(SlowCaseEntry(__ je(), i)); 790 791 __ movl_i32r(asInteger(jsBoolean(negated)), X86::eax); 792 793 __ link(bothWereImmediates, __ label()); 794 __ link(firstWasNotImmediate, __ label()); 795 270 void JIT::compileOpCallSlowCase(Instruction* instruction, unsigned i, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) 271 { 272 int dst = instruction[1].u.operand; 273 int callee = instruction[2].u.operand; 274 int argCount = instruction[3].u.operand; 275 int registerOffset = instruction[4].u.operand; 276 277 __ link(iter->from, __ label()); 278 279 // The arguments have been set up on the hot path for op_call_eval 280 if (opcodeID == op_call) 281 compileOpCallSetupArgs(instruction); 282 else if (opcodeID == op_construct) 283 compileOpConstructSetupArgs(instruction); 284 285 // Fast check for JS function. 286 __ testl_i32r(JSImmediate::TagMask, X86::ecx); 287 JmpSrc callLinkFailNotObject = __ jne(); 288 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); 289 JmpSrc callLinkFailNotJSFunction = __ jne(); 290 291 // First, in the case of a construct, allocate the new object. 292 if (opcodeID == op_construct) { 293 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); 294 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); 295 emitGetVirtualRegister(callee, X86::ecx, i); 296 } 297 298 __ movl_i32r(argCount, X86::edx); 299 300 // Speculatively roll the callframe, assuming argCount will match the arity. 301 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); 302 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); 303 304 m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = 305 emitNakedCall(i, m_interpreter->m_ctiVirtualCallPreLink); 306 307 JmpSrc storeResultForFirstRun = __ jmp(); 308 309 // This is the address for the cold path *after* the first run (which tries to link the call). 310 m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label(); 311 312 // The arguments have been set up on the hot path for op_call_eval 313 if (opcodeID == op_call) 314 compileOpCallSetupArgs(instruction); 315 else if (opcodeID == op_construct) 316 compileOpConstructSetupArgs(instruction); 317 318 // Check for JSFunctions. 319 __ testl_i32r(JSImmediate::TagMask, X86::ecx); 320 JmpSrc isNotObject = __ jne(); 321 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); 322 JmpSrc isJSFunction = __ je(); 323 324 // This handles host functions 325 JmpDst notJSFunctionlabel = __ label(); 326 __ link(isNotObject, notJSFunctionlabel); 327 __ link(callLinkFailNotObject, notJSFunctionlabel); 328 __ link(callLinkFailNotJSFunction, notJSFunctionlabel); 329 emitCTICall(i, ((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); 330 JmpSrc wasNotJSFunction = __ jmp(); 331 332 // Next, handle JSFunctions... 333 __ link(isJSFunction, __ label()); 334 335 // First, in the case of a construct, allocate the new object. 336 if (opcodeID == op_construct) { 337 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); 338 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); 339 emitGetVirtualRegister(callee, X86::ecx, i); 340 } 341 342 // Speculatively roll the callframe, assuming argCount will match the arity. 343 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); 344 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); 345 __ movl_i32r(argCount, X86::edx); 346 347 emitNakedCall(i, m_interpreter->m_ctiVirtualCall); 348 349 // Put the return value in dst. In the interpreter, op_ret does this. 350 JmpDst storeResult = __ label(); 351 __ link(wasNotJSFunction, storeResult); 352 __ link(storeResultForFirstRun, storeResult); 796 353 emitPutVirtualRegister(dst); 797 }798 799 void JIT::emitSlowScriptCheck(unsigned bytecodeIndex)800 {801 __ subl_i8r(1, X86::esi);802 JmpSrc skipTimeout = __ jne();803 emitCTICall(bytecodeIndex, Interpreter::cti_timeout_check);804 805 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);806 __ movl_mr(FIELD_OFFSET(JSGlobalData, interpreter), X86::ecx, X86::ecx);807 __ movl_mr(FIELD_OFFSET(Interpreter, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);808 __ link(skipTimeout, __ label());809 810 killLastResultRegister();811 }812 813 /*814 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.815 816 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'817 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).818 819 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow820 control will fall through from the code planted.821 */822 void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)823 {824 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.825 __ cvttsd2si_rr(xmmSource, tempReg1);826 __ addl_rr(tempReg1, tempReg1);827 __ sarl_i8r(1, tempReg1);828 __ cvtsi2sd_rr(tempReg1, tempXmm);829 // Compare & branch if immediate.830 __ ucomis_rr(tempXmm, xmmSource);831 JmpSrc resultIsImm = __ je();832 JmpDst resultLookedLikeImmButActuallyIsnt = __ label();833 834 // Store the result to the JSNumberCell and jump.835 __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);836 if (jsNumberCell != X86::eax)837 __ movl_rr(jsNumberCell, X86::eax);838 emitPutVirtualRegister(dst);839 *wroteJSNumberCell = __ jmp();840 841 __ link(resultIsImm, __ label());842 // value == (double)(JSImmediate)value... or at least, it looks that way...843 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).844 __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN845 __ pextrw_irr(3, xmmSource, tempReg2);846 __ cmpl_i32r(0x8000, tempReg2);847 __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0848 // Yes it really really really is representable as a JSImmediate.849 emitFastArithIntToImmNoCheck(tempReg1);850 if (tempReg1 != X86::eax)851 __ movl_rr(tempReg1, X86::eax);852 emitPutVirtualRegister(dst);853 }854 855 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)856 {857 Structure* numberStructure = m_globalData->numberStructure.get();858 JmpSrc wasJSNumberCell1;859 JmpSrc wasJSNumberCell1b;860 JmpSrc wasJSNumberCell2;861 JmpSrc wasJSNumberCell2b;862 863 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);864 865 if (types.second().isReusable() && isSSE2Present()) {866 ASSERT(types.second().mightBeNumber());867 868 // Check op2 is a number869 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);870 JmpSrc op2imm = __ jne();871 if (!types.second().definitelyIsNumber()) {872 emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);873 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);874 m_slowCases.append(SlowCaseEntry(__ jne(), i));875 }876 877 // (1) In this case src2 is a reusable number cell.878 // Slow case if src1 is not a number type.879 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);880 JmpSrc op1imm = __ jne();881 if (!types.first().definitelyIsNumber()) {882 emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);883 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);884 m_slowCases.append(SlowCaseEntry(__ jne(), i));885 }886 887 // (1a) if we get here, src1 is also a number cell888 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);889 JmpSrc loadedDouble = __ jmp();890 // (1b) if we get here, src1 is an immediate891 __ link(op1imm, __ label());892 emitFastArithImmToInt(X86::eax);893 __ cvtsi2sd_rr(X86::eax, X86::xmm0);894 // (1c)895 __ link(loadedDouble, __ label());896 if (opcodeID == op_add)897 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);898 else if (opcodeID == op_sub)899 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);900 else {901 ASSERT(opcodeID == op_mul);902 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);903 }904 905 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);906 wasJSNumberCell2b = __ jmp();907 908 // (2) This handles cases where src2 is an immediate number.909 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.910 __ link(op2imm, __ label());911 emitJumpSlowCaseIfNotImmNum(X86::eax, i);912 } else if (types.first().isReusable() && isSSE2Present()) {913 ASSERT(types.first().mightBeNumber());914 915 // Check op1 is a number916 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);917 JmpSrc op1imm = __ jne();918 if (!types.first().definitelyIsNumber()) {919 emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);920 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);921 m_slowCases.append(SlowCaseEntry(__ jne(), i));922 }923 924 // (1) In this case src1 is a reusable number cell.925 // Slow case if src2 is not a number type.926 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);927 JmpSrc op2imm = __ jne();928 if (!types.second().definitelyIsNumber()) {929 emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);930 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);931 m_slowCases.append(SlowCaseEntry(__ jne(), i));932 }933 934 // (1a) if we get here, src2 is also a number cell935 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);936 JmpSrc loadedDouble = __ jmp();937 // (1b) if we get here, src2 is an immediate938 __ link(op2imm, __ label());939 emitFastArithImmToInt(X86::edx);940 __ cvtsi2sd_rr(X86::edx, X86::xmm1);941 // (1c)942 __ link(loadedDouble, __ label());943 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);944 if (opcodeID == op_add)945 __ addsd_rr(X86::xmm1, X86::xmm0);946 else if (opcodeID == op_sub)947 __ subsd_rr(X86::xmm1, X86::xmm0);948 else {949 ASSERT(opcodeID == op_mul);950 __ mulsd_rr(X86::xmm1, X86::xmm0);951 }952 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);953 emitPutVirtualRegister(dst);954 955 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);956 wasJSNumberCell1b = __ jmp();957 958 // (2) This handles cases where src1 is an immediate number.959 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.960 __ link(op1imm, __ label());961 emitJumpSlowCaseIfNotImmNum(X86::edx, i);962 } else963 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);964 965 if (opcodeID == op_add) {966 emitFastArithDeTagImmediate(X86::eax);967 __ addl_rr(X86::edx, X86::eax);968 m_slowCases.append(SlowCaseEntry(__ jo(), i));969 } else if (opcodeID == op_sub) {970 __ subl_rr(X86::edx, X86::eax);971 m_slowCases.append(SlowCaseEntry(__ jo(), i));972 emitFastArithReTagImmediate(X86::eax);973 } else {974 ASSERT(opcodeID == op_mul);975 // convert eax & edx from JSImmediates to ints, and check if either are zero976 emitFastArithImmToInt(X86::edx);977 JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);978 __ testl_rr(X86::edx, X86::edx);979 JmpSrc op2NonZero = __ jne();980 __ link(op1Zero, __ label());981 // if either input is zero, add the two together, and check if the result is < 0.982 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.983 __ movl_rr(X86::eax, X86::ecx);984 __ addl_rr(X86::edx, X86::ecx);985 m_slowCases.append(SlowCaseEntry(__ js(), i));986 // Skip the above check if neither input is zero987 __ link(op2NonZero, __ label());988 __ imull_rr(X86::edx, X86::eax);989 m_slowCases.append(SlowCaseEntry(__ jo(), i));990 emitFastArithReTagImmediate(X86::eax);991 }992 emitPutVirtualRegister(dst);993 994 if (types.second().isReusable() && isSSE2Present()) {995 __ link(wasJSNumberCell2, __ label());996 __ link(wasJSNumberCell2b, __ label());997 }998 else if (types.first().isReusable() && isSSE2Present()) {999 __ link(wasJSNumberCell1, __ label());1000 __ link(wasJSNumberCell1b, __ label());1001 }1002 }1003 1004 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)1005 {1006 JmpDst here = __ label();1007 __ link(iter->from, here);1008 if (types.second().isReusable() && isSSE2Present()) {1009 if (!types.first().definitelyIsNumber()) {1010 if (linkSlowCaseIfNotJSCell(++iter, src1))1011 ++iter;1012 __ link(iter->from, here);1013 }1014 if (!types.second().definitelyIsNumber()) {1015 if (linkSlowCaseIfNotJSCell(++iter, src2))1016 ++iter;1017 __ link(iter->from, here);1018 }1019 __ link((++iter)->from, here);1020 } else if (types.first().isReusable() && isSSE2Present()) {1021 if (!types.first().definitelyIsNumber()) {1022 if (linkSlowCaseIfNotJSCell(++iter, src1))1023 ++iter;1024 __ link(iter->from, here);1025 }1026 if (!types.second().definitelyIsNumber()) {1027 if (linkSlowCaseIfNotJSCell(++iter, src2))1028 ++iter;1029 __ link(iter->from, here);1030 }1031 __ link((++iter)->from, here);1032 } else1033 __ link((++iter)->from, here);1034 1035 // additional entry point to handle -0 cases.1036 if (opcodeID == op_mul)1037 __ link((++iter)->from, here);1038 1039 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);1040 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);1041 if (opcodeID == op_add)1042 emitCTICall(i, Interpreter::cti_op_add);1043 else if (opcodeID == op_sub)1044 emitCTICall(i, Interpreter::cti_op_sub);1045 else {1046 ASSERT(opcodeID == op_mul);1047 emitCTICall(i, Interpreter::cti_op_mul);1048 }1049 emitPutVirtualRegister(dst);1050 }1051 1052 void JIT::privateCompileMainPass()1053 {1054 Instruction* instruction = m_codeBlock->instructions.begin();1055 unsigned instructionCount = m_codeBlock->instructions.size();1056 1057 unsigned propertyAccessInstructionIndex = 0;1058 unsigned callLinkInfoIndex = 0;1059 1060 for (unsigned i = 0; i < instructionCount; ) {1061 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);1062 1063 #if ENABLE(OPCODE_SAMPLING)1064 if (i > 0) // Avoid the overhead of sampling op_enter twice.1065 __ movl_i32m(m_interpreter->sampler()->encodeSample(instruction + i), m_interpreter->sampler()->sampleSlot());1066 #endif1067 1068 m_labels[i] = __ label();1069 OpcodeID opcodeID = m_interpreter->getOpcodeID(instruction[i].u.opcode);1070 switch (opcodeID) {1071 case op_mov: {1072 unsigned src = instruction[i + 2].u.operand;1073 if (m_codeBlock->isConstantRegisterIndex(src))1074 __ movl_i32r(asInteger(m_codeBlock->getConstant(src)), X86::eax);1075 else1076 emitGetVirtualRegister(src, X86::eax, i);1077 emitPutVirtualRegister(instruction[i + 1].u.operand);1078 i += 3;1079 break;1080 }1081 case op_add: {1082 unsigned dst = instruction[i + 1].u.operand;1083 unsigned src1 = instruction[i + 2].u.operand;1084 unsigned src2 = instruction[i + 3].u.operand;1085 1086 if (JSValue* value = getConstantImmediateNumericArg(src1)) {1087 emitGetVirtualRegister(src2, X86::eax, i);1088 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1089 __ addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);1090 m_slowCases.append(SlowCaseEntry(__ jo(), i));1091 emitPutVirtualRegister(dst);1092 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {1093 emitGetVirtualRegister(src1, X86::eax, i);1094 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1095 __ addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);1096 m_slowCases.append(SlowCaseEntry(__ jo(), i));1097 emitPutVirtualRegister(dst);1098 } else {1099 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);1100 if (types.first().mightBeNumber() && types.second().mightBeNumber())1101 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);1102 else {1103 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);1104 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);1105 emitCTICall(i, Interpreter::cti_op_add);1106 emitPutVirtualRegister(instruction[i + 1].u.operand);1107 }1108 }1109 1110 i += 5;1111 break;1112 }1113 case op_end: {1114 if (m_codeBlock->needsFullScopeChain)1115 emitCTICall(i, Interpreter::cti_op_end);1116 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1117 __ pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);1118 __ ret();1119 i += 2;1120 break;1121 }1122 case op_jmp: {1123 unsigned target = instruction[i + 1].u.operand;1124 m_jmpTable.append(JmpTable(__ jmp(), i + 1 + target));1125 i += 2;1126 break;1127 }1128 case op_pre_inc: {1129 int srcDst = instruction[i + 1].u.operand;1130 emitGetVirtualRegister(srcDst, X86::eax, i);1131 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1132 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);1133 m_slowCases.append(SlowCaseEntry(__ jo(), i));1134 emitPutVirtualRegister(srcDst);1135 i += 2;1136 break;1137 }1138 case op_loop: {1139 emitSlowScriptCheck(i);1140 1141 unsigned target = instruction[i + 1].u.operand;1142 m_jmpTable.append(JmpTable(__ jmp(), i + 1 + target));1143 i += 2;1144 break;1145 }1146 case op_loop_if_less: {1147 emitSlowScriptCheck(i);1148 1149 unsigned target = instruction[i + 3].u.operand;1150 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);1151 if (src2imm) {1152 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);1153 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1154 __ cmpl_i32r(asInteger(src2imm), X86::edx);1155 m_jmpTable.append(JmpTable(__ jl(), i + 3 + target));1156 } else {1157 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);1158 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1159 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1160 __ cmpl_rr(X86::edx, X86::eax);1161 m_jmpTable.append(JmpTable(__ jl(), i + 3 + target));1162 }1163 i += 4;1164 break;1165 }1166 case op_loop_if_lesseq: {1167 emitSlowScriptCheck(i);1168 1169 unsigned target = instruction[i + 3].u.operand;1170 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);1171 if (src2imm) {1172 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);1173 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1174 __ cmpl_i32r(asInteger(src2imm), X86::edx);1175 m_jmpTable.append(JmpTable(__ jle(), i + 3 + target));1176 } else {1177 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);1178 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1179 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1180 __ cmpl_rr(X86::edx, X86::eax);1181 m_jmpTable.append(JmpTable(__ jle(), i + 3 + target));1182 }1183 i += 4;1184 break;1185 }1186 case op_new_object: {1187 emitCTICall(i, Interpreter::cti_op_new_object);1188 emitPutVirtualRegister(instruction[i + 1].u.operand);1189 i += 2;1190 break;1191 }1192 case op_put_by_id: {1193 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,1194 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code1195 // such that the Structure & offset are always at the same distance from this.1196 1197 int baseVReg = instruction[i + 1].u.operand;1198 emitGetVirtualRegisters(baseVReg, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1199 1200 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);1201 1202 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.1203 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);1204 1205 JmpDst hotPathBegin = __ label();1206 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;1207 ++propertyAccessInstructionIndex;1208 1209 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.1210 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);1211 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);1212 m_slowCases.append(SlowCaseEntry(__ jne(), i));1213 1214 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.1215 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);1216 __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);1217 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);1218 1219 i += 8;1220 break;1221 }1222 case op_get_by_id: {1223 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.1224 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump1225 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label1226 // to jump back to if one of these trampolies finds a match.1227 1228 int baseVReg = instruction[i + 2].u.operand;1229 emitGetVirtualRegister(baseVReg, X86::eax, i);1230 1231 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);1232 1233 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);1234 1235 JmpDst hotPathBegin = __ label();1236 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;1237 ++propertyAccessInstructionIndex;1238 1239 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);1240 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);1241 m_slowCases.append(SlowCaseEntry(__ jne(), i));1242 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);1243 1244 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);1245 __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);1246 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);1247 emitPutVirtualRegister(instruction[i + 1].u.operand);1248 1249 i += 8;1250 break;1251 }1252 case op_instanceof: {1253 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i); // value1254 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i); // baseVal1255 emitGetVirtualRegister(instruction[i + 4].u.operand, X86::edx, i); // proto1256 1257 // check if any are immediates1258 __ orl_rr(X86::eax, X86::ecx);1259 __ orl_rr(X86::edx, X86::ecx);1260 __ testl_i32r(JSImmediate::TagMask, X86::ecx);1261 1262 m_slowCases.append(SlowCaseEntry(__ jnz(), i));1263 1264 // check that all are object type - this is a bit of a bithack to avoid excess branching;1265 // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,1266 // this works because NumberType and StringType are smaller1267 __ movl_i32r(3 * ObjectType, X86::ecx);1268 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::eax);1269 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::edx, X86::edx);1270 __ subl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::eax, X86::ecx);1271 __ subl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::edx, X86::ecx);1272 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::edx, i); // reload baseVal1273 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::edx, X86::edx);1274 __ cmpl_rm(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::edx);1275 1276 m_slowCases.append(SlowCaseEntry(__ jne(), i));1277 1278 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance1279 __ movl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::edx, X86::ecx);1280 __ andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);1281 __ cmpl_i32r(ImplementsHasInstance, X86::ecx);1282 1283 m_slowCases.append(SlowCaseEntry(__ jne(), i));1284 1285 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::ecx, i); // reload value1286 emitGetVirtualRegister(instruction[i + 4].u.operand, X86::edx, i); // reload proto1287 1288 // optimistically load true result1289 __ movl_i32r(asInteger(jsBoolean(true)), X86::eax);1290 1291 JmpDst loop = __ label();1292 1293 // load value's prototype1294 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);1295 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);1296 1297 __ cmpl_rr(X86::ecx, X86::edx);1298 JmpSrc exit = __ je();1299 1300 __ cmpl_i32r(asInteger(jsNull()), X86::ecx);1301 JmpSrc goToLoop = __ jne();1302 __ link(goToLoop, loop);1303 1304 __ movl_i32r(asInteger(jsBoolean(false)), X86::eax);1305 1306 __ link(exit, __ label());1307 1308 emitPutVirtualRegister(instruction[i + 1].u.operand);1309 1310 i += 5;1311 break;1312 }1313 case op_del_by_id: {1314 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);1315 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);1316 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);1317 emitCTICall(i, Interpreter::cti_op_del_by_id);1318 emitPutVirtualRegister(instruction[i + 1].u.operand);1319 i += 4;1320 break;1321 }1322 case op_mul: {1323 unsigned dst = instruction[i + 1].u.operand;1324 unsigned src1 = instruction[i + 2].u.operand;1325 unsigned src2 = instruction[i + 3].u.operand;1326 1327 // For now, only plant a fast int case if the constant operand is greater than zero.1328 JSValue* src1Value = getConstantImmediateNumericArg(src1);1329 JSValue* src2Value = getConstantImmediateNumericArg(src2);1330 int32_t value;1331 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {1332 emitGetVirtualRegister(src2, X86::eax, i);1333 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1334 emitFastArithDeTagImmediate(X86::eax);1335 __ imull_i32r(X86::eax, value, X86::eax);1336 m_slowCases.append(SlowCaseEntry(__ jo(), i));1337 emitFastArithReTagImmediate(X86::eax);1338 emitPutVirtualRegister(dst);1339 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {1340 emitGetVirtualRegister(src1, X86::eax, i);1341 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1342 emitFastArithDeTagImmediate(X86::eax);1343 __ imull_i32r(X86::eax, value, X86::eax);1344 m_slowCases.append(SlowCaseEntry(__ jo(), i));1345 emitFastArithReTagImmediate(X86::eax);1346 emitPutVirtualRegister(dst);1347 } else1348 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);1349 1350 i += 5;1351 break;1352 }1353 case op_new_func: {1354 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();1355 emitPutCTIArgConstant(reinterpret_cast<unsigned>(func), 0);1356 emitCTICall(i, Interpreter::cti_op_new_func);1357 emitPutVirtualRegister(instruction[i + 1].u.operand);1358 i += 3;1359 break;1360 }1361 case op_call:1362 case op_call_eval:1363 case op_construct: {1364 compileOpCall(opcodeID, instruction + i, i, callLinkInfoIndex++);1365 i += (opcodeID == op_construct ? 7 : 5);1366 break;1367 }1368 case op_get_global_var: {1369 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);1370 __ movl_i32r(asInteger(globalObject), X86::eax);1371 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);1372 emitPutVirtualRegister(instruction[i + 1].u.operand);1373 i += 4;1374 break;1375 }1376 case op_put_global_var: {1377 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::edx, i);1378 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);1379 __ movl_i32r(asInteger(globalObject), X86::eax);1380 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);1381 i += 4;1382 break;1383 }1384 case op_get_scoped_var: {1385 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;1386 1387 emitGetVirtualRegister(RegisterFile::ScopeChain, X86::eax, i);1388 while (skip--)1389 __ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);1390 1391 __ movl_mr(FIELD_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);1392 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);1393 emitPutVirtualRegister(instruction[i + 1].u.operand);1394 i += 4;1395 break;1396 }1397 case op_put_scoped_var: {1398 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;1399 1400 emitGetVirtualRegister(RegisterFile::ScopeChain, X86::edx, i);1401 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::eax, i);1402 while (skip--)1403 __ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);1404 1405 __ movl_mr(FIELD_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);1406 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);1407 i += 4;1408 break;1409 }1410 case op_tear_off_activation: {1411 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);1412 emitCTICall(i, Interpreter::cti_op_tear_off_activation);1413 i += 2;1414 break;1415 }1416 case op_tear_off_arguments: {1417 emitCTICall(i, Interpreter::cti_op_tear_off_arguments);1418 i += 1;1419 break;1420 }1421 case op_ret: {1422 // We could JIT generate the deref, only calling out to C when the refcount hits zero.1423 if (m_codeBlock->needsFullScopeChain)1424 emitCTICall(i, Interpreter::cti_op_ret_scopeChain);1425 1426 // Return the result in %eax.1427 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1428 1429 // Grab the return address.1430 emitGetVirtualRegister(RegisterFile::ReturnPC, X86::edx, i);1431 1432 // Restore our caller's "r".1433 emitGetVirtualRegister(RegisterFile::CallerFrame, X86::edi, i);1434 1435 // Return.1436 __ pushl_r(X86::edx);1437 __ ret();1438 1439 i += 2;1440 break;1441 }1442 case op_new_array: {1443 __ leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);1444 emitPutCTIArg(X86::edx, 0);1445 emitPutCTIArgConstant(instruction[i + 3].u.operand, 4);1446 emitCTICall(i, Interpreter::cti_op_new_array);1447 emitPutVirtualRegister(instruction[i + 1].u.operand);1448 i += 4;1449 break;1450 }1451 case op_resolve: {1452 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);1453 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);1454 emitCTICall(i, Interpreter::cti_op_resolve);1455 emitPutVirtualRegister(instruction[i + 1].u.operand);1456 i += 3;1457 break;1458 }1459 case op_construct_verify: {1460 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1461 1462 __ testl_i32r(JSImmediate::TagMask, X86::eax);1463 JmpSrc isImmediate = __ jne();1464 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);1465 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);1466 JmpSrc isObject = __ je();1467 1468 __ link(isImmediate, __ label());1469 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);1470 emitPutVirtualRegister(instruction[i + 1].u.operand);1471 __ link(isObject, __ label());1472 1473 i += 3;1474 break;1475 }1476 case op_get_by_val: {1477 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1478 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1479 emitFastArithImmToInt(X86::edx);1480 __ testl_i32r(JSImmediate::TagMask, X86::eax);1481 m_slowCases.append(SlowCaseEntry(__ jne(), i));1482 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);1483 m_slowCases.append(SlowCaseEntry(__ jne(), i));1484 1485 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff1486 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);1487 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);1488 m_slowCases.append(SlowCaseEntry(__ jbe(), i));1489 1490 // Get the value from the vector1491 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);1492 emitPutVirtualRegister(instruction[i + 1].u.operand);1493 i += 4;1494 break;1495 }1496 case op_resolve_func: {1497 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);1498 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);1499 emitCTICall(i, Interpreter::cti_op_resolve_func);1500 emitPutVirtualRegister(instruction[i + 2].u.operand, X86::edx);1501 emitPutVirtualRegister(instruction[i + 1].u.operand);1502 i += 4;1503 break;1504 }1505 case op_sub: {1506 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);1507 i += 5;1508 break;1509 }1510 case op_put_by_val: {1511 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);1512 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1513 emitFastArithImmToInt(X86::edx);1514 __ testl_i32r(JSImmediate::TagMask, X86::eax);1515 m_slowCases.append(SlowCaseEntry(__ jne(), i));1516 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);1517 m_slowCases.append(SlowCaseEntry(__ jne(), i));1518 1519 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff1520 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);1521 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);1522 JmpSrc inFastVector = __ ja();1523 // No; oh well, check if the access if within the vector - if so, we may still be okay.1524 __ cmpl_rm(X86::edx, FIELD_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);1525 m_slowCases.append(SlowCaseEntry(__ jbe(), i));1526 1527 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.1528 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.1529 __ cmpl_i8m(0, FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));1530 m_slowCases.append(SlowCaseEntry(__ je(), i));1531 1532 // All good - put the value into the array.1533 __ link(inFastVector, __ label());1534 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::eax, i);1535 __ movl_rm(X86::eax, FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));1536 i += 4;1537 break;1538 }1539 CTI_COMPILE_BINARY_OP(op_lesseq)1540 case op_loop_if_true: {1541 emitSlowScriptCheck(i);1542 1543 unsigned target = instruction[i + 2].u.operand;1544 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1545 1546 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);1547 JmpSrc isZero = __ je();1548 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);1549 m_jmpTable.append(JmpTable(__ jne(), i + 2 + target));1550 1551 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);1552 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));1553 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);1554 m_slowCases.append(SlowCaseEntry(__ jne(), i));1555 1556 __ link(isZero, __ label());1557 i += 3;1558 break;1559 };1560 case op_resolve_base: {1561 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);1562 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);1563 emitCTICall(i, Interpreter::cti_op_resolve_base);1564 emitPutVirtualRegister(instruction[i + 1].u.operand);1565 i += 3;1566 break;1567 }1568 case op_negate: {1569 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);1570 emitCTICall(i, Interpreter::cti_op_negate);1571 emitPutVirtualRegister(instruction[i + 1].u.operand);1572 i += 3;1573 break;1574 }1575 case op_resolve_skip: {1576 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);1577 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);1578 emitPutCTIArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);1579 emitCTICall(i, Interpreter::cti_op_resolve_skip);1580 emitPutVirtualRegister(instruction[i + 1].u.operand);1581 i += 4;1582 break;1583 }1584 case op_resolve_global: {1585 // Fast case1586 unsigned globalObject = asInteger(instruction[i + 2].u.jsCell);1587 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);1588 void* structureAddress = reinterpret_cast<void*>(instruction + i + 4);1589 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);1590 1591 // Check Structure of global object1592 __ movl_i32r(globalObject, X86::eax);1593 __ movl_mr(structureAddress, X86::edx);1594 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSCell, m_structure), X86::eax);1595 JmpSrc noMatch = __ jne(); // Structures don't match1596 1597 // Load cached property1598 __ movl_mr(FIELD_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);1599 __ movl_mr(offsetAddr, X86::edx);1600 __ movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);1601 emitPutVirtualRegister(instruction[i + 1].u.operand);1602 JmpSrc end = __ jmp();1603 1604 // Slow case1605 __ link(noMatch, __ label());1606 emitPutCTIArgConstant(globalObject, 0);1607 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);1608 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);1609 emitCTICall(i, Interpreter::cti_op_resolve_global);1610 emitPutVirtualRegister(instruction[i + 1].u.operand);1611 __ link(end, __ label());1612 i += 6;1613 break;1614 }1615 CTI_COMPILE_BINARY_OP(op_div)1616 case op_pre_dec: {1617 int srcDst = instruction[i + 1].u.operand;1618 emitGetVirtualRegister(srcDst, X86::eax, i);1619 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1620 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);1621 m_slowCases.append(SlowCaseEntry(__ jo(), i));1622 emitPutVirtualRegister(srcDst);1623 i += 2;1624 break;1625 }1626 case op_jnless: {1627 unsigned target = instruction[i + 3].u.operand;1628 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);1629 if (src2imm) {1630 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);1631 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1632 __ cmpl_i32r(asInteger(src2imm), X86::edx);1633 m_jmpTable.append(JmpTable(__ jge(), i + 3 + target));1634 } else {1635 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);1636 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1637 emitJumpSlowCaseIfNotImmNum(X86::edx, i);1638 __ cmpl_rr(X86::edx, X86::eax);1639 m_jmpTable.append(JmpTable(__ jge(), i + 3 + target));1640 }1641 i += 4;1642 break;1643 }1644 case op_not: {1645 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);1646 __ xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);1647 __ testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?1648 m_slowCases.append(SlowCaseEntry(__ jne(), i));1649 __ xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);1650 emitPutVirtualRegister(instruction[i + 1].u.operand);1651 i += 3;1652 break;1653 }1654 case op_jfalse: {1655 unsigned target = instruction[i + 2].u.operand;1656 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1657 1658 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);1659 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));1660 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);1661 JmpSrc isNonZero = __ jne();1662 1663 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);1664 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));1665 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);1666 m_slowCases.append(SlowCaseEntry(__ jne(), i));1667 1668 __ link(isNonZero, __ label());1669 i += 3;1670 break;1671 };1672 case op_jeq_null: {1673 unsigned src = instruction[i + 1].u.operand;1674 unsigned target = instruction[i + 2].u.operand;1675 1676 emitGetVirtualRegister(src, X86::eax, i);1677 __ testl_i32r(JSImmediate::TagMask, X86::eax);1678 JmpSrc isImmediate = __ jnz();1679 1680 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);1681 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);1682 __ setnz_r(X86::eax);1683 1684 JmpSrc wasNotImmediate = __ jmp();1685 1686 __ link(isImmediate, __ label());1687 1688 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);1689 __ andl_rr(X86::eax, X86::ecx);1690 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);1691 __ sete_r(X86::eax);1692 1693 __ link(wasNotImmediate, __ label());1694 1695 __ movzbl_rr(X86::eax, X86::eax);1696 __ cmpl_i32r(0, X86::eax);1697 m_jmpTable.append(JmpTable(__ jnz(), i + 2 + target));1698 1699 i += 3;1700 break;1701 };1702 case op_jneq_null: {1703 unsigned src = instruction[i + 1].u.operand;1704 unsigned target = instruction[i + 2].u.operand;1705 1706 emitGetVirtualRegister(src, X86::eax, i);1707 __ testl_i32r(JSImmediate::TagMask, X86::eax);1708 JmpSrc isImmediate = __ jnz();1709 1710 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);1711 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);1712 __ setz_r(X86::eax);1713 1714 JmpSrc wasNotImmediate = __ jmp();1715 1716 __ link(isImmediate, __ label());1717 1718 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);1719 __ andl_rr(X86::eax, X86::ecx);1720 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);1721 __ setne_r(X86::eax);1722 1723 __ link(wasNotImmediate, __ label());1724 1725 __ movzbl_rr(X86::eax, X86::eax);1726 __ cmpl_i32r(0, X86::eax);1727 m_jmpTable.append(JmpTable(__ jnz(), i + 2 + target));1728 1729 i += 3;1730 break;1731 }1732 case op_post_inc: {1733 int srcDst = instruction[i + 2].u.operand;1734 emitGetVirtualRegister(srcDst, X86::eax, i);1735 __ movl_rr(X86::eax, X86::edx);1736 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1737 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);1738 m_slowCases.append(SlowCaseEntry(__ jo(), i));1739 emitPutVirtualRegister(srcDst, X86::edx);1740 emitPutVirtualRegister(instruction[i + 1].u.operand);1741 i += 3;1742 break;1743 }1744 case op_unexpected_load: {1745 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];1746 __ movl_i32r(asInteger(v), X86::eax);1747 emitPutVirtualRegister(instruction[i + 1].u.operand);1748 i += 3;1749 break;1750 }1751 case op_jsr: {1752 int retAddrDst = instruction[i + 1].u.operand;1753 int target = instruction[i + 2].u.operand;1754 __ movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);1755 JmpDst addrPosition = __ label();1756 m_jmpTable.append(JmpTable(__ jmp(), i + 2 + target));1757 JmpDst sretTarget = __ label();1758 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));1759 i += 3;1760 break;1761 }1762 case op_sret: {1763 __ jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);1764 i += 2;1765 break;1766 }1767 case op_eq: {1768 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1769 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);1770 __ cmpl_rr(X86::edx, X86::eax);1771 __ sete_r(X86::eax);1772 __ movzbl_rr(X86::eax, X86::eax);1773 emitTagAsBoolImmediate(X86::eax);1774 emitPutVirtualRegister(instruction[i + 1].u.operand);1775 i += 4;1776 break;1777 }1778 case op_lshift: {1779 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);1780 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1781 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);1782 emitFastArithImmToInt(X86::eax);1783 emitFastArithImmToInt(X86::ecx);1784 __ shll_CLr(X86::eax);1785 emitFastArithIntToImmOrSlowCase(X86::eax, i);1786 emitPutVirtualRegister(instruction[i + 1].u.operand);1787 i += 4;1788 break;1789 }1790 case op_bitand: {1791 unsigned src1 = instruction[i + 2].u.operand;1792 unsigned src2 = instruction[i + 3].u.operand;1793 unsigned dst = instruction[i + 1].u.operand;1794 if (JSValue* value = getConstantImmediateNumericArg(src1)) {1795 emitGetVirtualRegister(src2, X86::eax, i);1796 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1797 __ andl_i32r(asInteger(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate1798 emitPutVirtualRegister(dst);1799 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {1800 emitGetVirtualRegister(src1, X86::eax, i);1801 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1802 __ andl_i32r(asInteger(value), X86::eax);1803 emitPutVirtualRegister(dst);1804 } else {1805 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);1806 __ andl_rr(X86::edx, X86::eax);1807 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1808 emitPutVirtualRegister(dst);1809 }1810 i += 5;1811 break;1812 }1813 case op_rshift: {1814 unsigned src1 = instruction[i + 2].u.operand;1815 unsigned src2 = instruction[i + 3].u.operand;1816 if (JSValue* value = getConstantImmediateNumericArg(src2)) {1817 emitGetVirtualRegister(src1, X86::eax, i);1818 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1819 // Mask with 0x1f as per ecma-262 11.7.2 step 7.1820 __ sarl_i8r(JSImmediate::getTruncatedUInt32(value) & 0x1f, X86::eax);1821 } else {1822 emitGetVirtualRegisters(src1, X86::eax, src2, X86::ecx, i);1823 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1824 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);1825 emitFastArithImmToInt(X86::ecx);1826 __ sarl_CLr(X86::eax);1827 }1828 emitFastArithPotentiallyReTagImmediate(X86::eax);1829 emitPutVirtualRegister(instruction[i + 1].u.operand);1830 i += 4;1831 break;1832 }1833 case op_bitnot: {1834 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);1835 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1836 __ xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);1837 emitPutVirtualRegister(instruction[i + 1].u.operand);1838 i += 3;1839 break;1840 }1841 case op_resolve_with_base: {1842 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);1843 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);1844 emitCTICall(i, Interpreter::cti_op_resolve_with_base);1845 emitPutVirtualRegister(instruction[i + 2].u.operand, X86::edx);1846 emitPutVirtualRegister(instruction[i + 1].u.operand);1847 i += 4;1848 break;1849 }1850 case op_new_func_exp: {1851 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();1852 emitPutCTIArgConstant(reinterpret_cast<unsigned>(func), 0);1853 emitCTICall(i, Interpreter::cti_op_new_func_exp);1854 emitPutVirtualRegister(instruction[i + 1].u.operand);1855 i += 3;1856 break;1857 }1858 case op_mod: {1859 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);1860 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1861 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);1862 emitFastArithDeTagImmediate(X86::eax);1863 m_slowCases.append(SlowCaseEntry(emitFastArithDeTagImmediateJumpIfZero(X86::ecx), i));1864 __ cdq();1865 __ idivl_r(X86::ecx);1866 emitFastArithReTagImmediate(X86::edx);1867 __ movl_rr(X86::edx, X86::eax);1868 emitPutVirtualRegister(instruction[i + 1].u.operand);1869 i += 4;1870 break;1871 }1872 case op_jtrue: {1873 unsigned target = instruction[i + 2].u.operand;1874 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);1875 1876 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);1877 JmpSrc isZero = __ je();1878 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);1879 m_jmpTable.append(JmpTable(__ jne(), i + 2 + target));1880 1881 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);1882 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));1883 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);1884 m_slowCases.append(SlowCaseEntry(__ jne(), i));1885 1886 __ link(isZero, __ label());1887 i += 3;1888 break;1889 }1890 CTI_COMPILE_BINARY_OP(op_less)1891 case op_neq: {1892 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1893 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);1894 __ cmpl_rr(X86::eax, X86::edx);1895 1896 __ setne_r(X86::eax);1897 __ movzbl_rr(X86::eax, X86::eax);1898 emitTagAsBoolImmediate(X86::eax);1899 1900 emitPutVirtualRegister(instruction[i + 1].u.operand);1901 1902 i += 4;1903 break;1904 }1905 case op_post_dec: {1906 int srcDst = instruction[i + 2].u.operand;1907 emitGetVirtualRegister(srcDst, X86::eax, i);1908 __ movl_rr(X86::eax, X86::edx);1909 emitJumpSlowCaseIfNotImmNum(X86::eax, i);1910 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);1911 m_slowCases.append(SlowCaseEntry(__ jo(), i));1912 emitPutVirtualRegister(srcDst, X86::edx);1913 emitPutVirtualRegister(instruction[i + 1].u.operand);1914 i += 3;1915 break;1916 }1917 CTI_COMPILE_BINARY_OP(op_urshift)1918 case op_bitxor: {1919 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1920 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);1921 __ xorl_rr(X86::edx, X86::eax);1922 emitFastArithReTagImmediate(X86::eax);1923 emitPutVirtualRegister(instruction[i + 1].u.operand);1924 i += 5;1925 break;1926 }1927 case op_new_regexp: {1928 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();1929 emitPutCTIArgConstant(reinterpret_cast<unsigned>(regExp), 0);1930 emitCTICall(i, Interpreter::cti_op_new_regexp);1931 emitPutVirtualRegister(instruction[i + 1].u.operand);1932 i += 3;1933 break;1934 }1935 case op_bitor: {1936 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);1937 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);1938 __ orl_rr(X86::edx, X86::eax);1939 emitPutVirtualRegister(instruction[i + 1].u.operand);1940 i += 5;1941 break;1942 }1943 case op_throw: {1944 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);1945 emitCTICall(i, Interpreter::cti_op_throw);1946 __ addl_i8r(0x20, X86::esp);1947 __ popl_r(X86::ebx);1948 __ popl_r(X86::edi);1949 __ popl_r(X86::esi);1950 __ ret();1951 i += 2;1952 break;1953 }1954 case op_get_pnames: {1955 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);1956 emitCTICall(i, Interpreter::cti_op_get_pnames);1957 emitPutVirtualRegister(instruction[i + 1].u.operand);1958 i += 3;1959 break;1960 }1961 case op_next_pname: {1962 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);1963 unsigned target = instruction[i + 3].u.operand;1964 emitCTICall(i, Interpreter::cti_op_next_pname);1965 __ testl_rr(X86::eax, X86::eax);1966 JmpSrc endOfIter = __ je();1967 emitPutVirtualRegister(instruction[i + 1].u.operand);1968 m_jmpTable.append(JmpTable(__ jmp(), i + 3 + target));1969 __ link(endOfIter, __ label());1970 i += 4;1971 break;1972 }1973 case op_push_scope: {1974 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);1975 emitCTICall(i, Interpreter::cti_op_push_scope);1976 i += 2;1977 break;1978 }1979 case op_pop_scope: {1980 emitCTICall(i, Interpreter::cti_op_pop_scope);1981 i += 1;1982 break;1983 }1984 CTI_COMPILE_UNARY_OP(op_typeof)1985 CTI_COMPILE_UNARY_OP(op_is_undefined)1986 CTI_COMPILE_UNARY_OP(op_is_boolean)1987 CTI_COMPILE_UNARY_OP(op_is_number)1988 CTI_COMPILE_UNARY_OP(op_is_string)1989 CTI_COMPILE_UNARY_OP(op_is_object)1990 CTI_COMPILE_UNARY_OP(op_is_function)1991 case op_stricteq: {1992 compileOpStrictEq(instruction + i, i, OpStrictEq);1993 i += 4;1994 break;1995 }1996 case op_nstricteq: {1997 compileOpStrictEq(instruction + i, i, OpNStrictEq);1998 i += 4;1999 break;2000 }2001 case op_to_jsnumber: {2002 int srcVReg = instruction[i + 2].u.operand;2003 emitGetVirtualRegister(srcVReg, X86::eax, i);2004 2005 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);2006 JmpSrc wasImmediate = __ jnz();2007 2008 emitJumpSlowCaseIfNotJSCell(X86::eax, i, srcVReg);2009 2010 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);2011 __ cmpl_i32m(NumberType, FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::ecx);2012 2013 m_slowCases.append(SlowCaseEntry(__ jne(), i));2014 2015 __ link(wasImmediate, __ label());2016 2017 emitPutVirtualRegister(instruction[i + 1].u.operand);2018 i += 3;2019 break;2020 }2021 case op_in: {2022 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);2023 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);2024 emitCTICall(i, Interpreter::cti_op_in);2025 emitPutVirtualRegister(instruction[i + 1].u.operand);2026 i += 4;2027 break;2028 }2029 case op_push_new_scope: {2030 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);2031 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);2032 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);2033 emitCTICall(i, Interpreter::cti_op_push_new_scope);2034 emitPutVirtualRegister(instruction[i + 1].u.operand);2035 i += 4;2036 break;2037 }2038 case op_catch: {2039 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r2040 emitPutVirtualRegister(instruction[i + 1].u.operand);2041 i += 2;2042 break;2043 }2044 case op_jmp_scopes: {2045 unsigned count = instruction[i + 1].u.operand;2046 emitPutCTIArgConstant(count, 0);2047 emitCTICall(i, Interpreter::cti_op_jmp_scopes);2048 unsigned target = instruction[i + 2].u.operand;2049 m_jmpTable.append(JmpTable(__ jmp(), i + 2 + target));2050 i += 3;2051 break;2052 }2053 case op_put_by_index: {2054 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);2055 emitPutCTIArgConstant(instruction[i + 2].u.operand, 4);2056 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);2057 emitCTICall(i, Interpreter::cti_op_put_by_index);2058 i += 4;2059 break;2060 }2061 case op_switch_imm: {2062 unsigned tableIndex = instruction[i + 1].u.operand;2063 unsigned defaultOffset = instruction[i + 2].u.operand;2064 unsigned scrutinee = instruction[i + 3].u.operand;2065 2066 // create jump table for switch destinations, track this switch statement.2067 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];2068 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));2069 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());2070 2071 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);2072 emitPutCTIArgConstant(tableIndex, 4);2073 emitCTICall(i, Interpreter::cti_op_switch_imm);2074 __ jmp_r(X86::eax);2075 i += 4;2076 break;2077 }2078 case op_switch_char: {2079 unsigned tableIndex = instruction[i + 1].u.operand;2080 unsigned defaultOffset = instruction[i + 2].u.operand;2081 unsigned scrutinee = instruction[i + 3].u.operand;2082 2083 // create jump table for switch destinations, track this switch statement.2084 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];2085 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));2086 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());2087 2088 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);2089 emitPutCTIArgConstant(tableIndex, 4);2090 emitCTICall(i, Interpreter::cti_op_switch_char);2091 __ jmp_r(X86::eax);2092 i += 4;2093 break;2094 }2095 case op_switch_string: {2096 unsigned tableIndex = instruction[i + 1].u.operand;2097 unsigned defaultOffset = instruction[i + 2].u.operand;2098 unsigned scrutinee = instruction[i + 3].u.operand;2099 2100 // create jump table for switch destinations, track this switch statement.2101 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];2102 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));2103 2104 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);2105 emitPutCTIArgConstant(tableIndex, 4);2106 emitCTICall(i, Interpreter::cti_op_switch_string);2107 __ jmp_r(X86::eax);2108 i += 4;2109 break;2110 }2111 case op_del_by_val: {2112 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);2113 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);2114 emitCTICall(i, Interpreter::cti_op_del_by_val);2115 emitPutVirtualRegister(instruction[i + 1].u.operand);2116 i += 4;2117 break;2118 }2119 case op_put_getter: {2120 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);2121 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);2122 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);2123 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);2124 emitCTICall(i, Interpreter::cti_op_put_getter);2125 i += 4;2126 break;2127 }2128 case op_put_setter: {2129 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);2130 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);2131 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);2132 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);2133 emitCTICall(i, Interpreter::cti_op_put_setter);2134 i += 4;2135 break;2136 }2137 case op_new_error: {2138 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];2139 emitPutCTIArgConstant(instruction[i + 2].u.operand, 0);2140 emitPutCTIArgConstant(asInteger(message), 4);2141 emitPutCTIArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);2142 emitCTICall(i, Interpreter::cti_op_new_error);2143 emitPutVirtualRegister(instruction[i + 1].u.operand);2144 i += 4;2145 break;2146 }2147 case op_debug: {2148 emitPutCTIArgConstant(instruction[i + 1].u.operand, 0);2149 emitPutCTIArgConstant(instruction[i + 2].u.operand, 4);2150 emitPutCTIArgConstant(instruction[i + 3].u.operand, 8);2151 emitCTICall(i, Interpreter::cti_op_debug);2152 i += 4;2153 break;2154 }2155 case op_eq_null: {2156 unsigned dst = instruction[i + 1].u.operand;2157 unsigned src1 = instruction[i + 2].u.operand;2158 2159 emitGetVirtualRegister(src1, X86::eax, i);2160 __ testl_i32r(JSImmediate::TagMask, X86::eax);2161 JmpSrc isImmediate = __ jnz();2162 2163 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);2164 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);2165 __ setnz_r(X86::eax);2166 2167 JmpSrc wasNotImmediate = __ jmp();2168 2169 __ link(isImmediate, __ label());2170 2171 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);2172 __ andl_rr(X86::eax, X86::ecx);2173 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);2174 __ sete_r(X86::eax);2175 2176 __ link(wasNotImmediate, __ label());2177 2178 __ movzbl_rr(X86::eax, X86::eax);2179 emitTagAsBoolImmediate(X86::eax);2180 emitPutVirtualRegister(dst);2181 2182 i += 3;2183 break;2184 }2185 case op_neq_null: {2186 unsigned dst = instruction[i + 1].u.operand;2187 unsigned src1 = instruction[i + 2].u.operand;2188 2189 emitGetVirtualRegister(src1, X86::eax, i);2190 __ testl_i32r(JSImmediate::TagMask, X86::eax);2191 JmpSrc isImmediate = __ jnz();2192 2193 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);2194 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);2195 __ setz_r(X86::eax);2196 2197 JmpSrc wasNotImmediate = __ jmp();2198 2199 __ link(isImmediate, __ label());2200 2201 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);2202 __ andl_rr(X86::eax, X86::ecx);2203 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);2204 __ setne_r(X86::eax);2205 2206 __ link(wasNotImmediate, __ label());2207 2208 __ movzbl_rr(X86::eax, X86::eax);2209 emitTagAsBoolImmediate(X86::eax);2210 emitPutVirtualRegister(dst);2211 2212 i += 3;2213 break;2214 }2215 case op_enter: {2216 // Even though CTI doesn't use them, we initialize our constant2217 // registers to zap stale pointers, to avoid unnecessarily prolonging2218 // object lifetime and increasing GC pressure.2219 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();2220 for (size_t j = 0; j < count; ++j)2221 emitInitRegister(j);2222 2223 i+= 1;2224 break;2225 }2226 case op_enter_with_activation: {2227 // Even though CTI doesn't use them, we initialize our constant2228 // registers to zap stale pointers, to avoid unnecessarily prolonging2229 // object lifetime and increasing GC pressure.2230 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();2231 for (size_t j = 0; j < count; ++j)2232 emitInitRegister(j);2233 2234 emitCTICall(i, Interpreter::cti_op_push_activation);2235 emitPutVirtualRegister(instruction[i + 1].u.operand);2236 2237 i+= 2;2238 break;2239 }2240 case op_create_arguments: {2241 emitCTICall(i, (m_codeBlock->numParameters == 1) ? Interpreter::cti_op_create_arguments_no_params : Interpreter::cti_op_create_arguments);2242 i += 1;2243 break;2244 }2245 case op_convert_this: {2246 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);2247 2248 emitJumpSlowCaseIfNotJSCell(X86::eax, i);2249 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::edx);2250 __ testl_i32m(NeedsThisConversion, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::edx);2251 m_slowCases.append(SlowCaseEntry(__ jnz(), i));2252 2253 i += 2;2254 break;2255 }2256 case op_profile_will_call: {2257 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);2258 __ cmpl_i32m(0, X86::eax);2259 JmpSrc noProfiler = __ je();2260 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::eax);2261 emitCTICall(i, Interpreter::cti_op_profile_will_call);2262 __ link(noProfiler, __ label());2263 2264 i += 2;2265 break;2266 }2267 case op_profile_did_call: {2268 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);2269 __ cmpl_i32m(0, X86::eax);2270 JmpSrc noProfiler = __ je();2271 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::eax);2272 emitCTICall(i, Interpreter::cti_op_profile_did_call);2273 __ link(noProfiler, __ label());2274 2275 i += 2;2276 break;2277 }2278 case op_get_array_length:2279 case op_get_by_id_chain:2280 case op_get_by_id_generic:2281 case op_get_by_id_proto:2282 case op_get_by_id_proto_list:2283 case op_get_by_id_self:2284 case op_get_by_id_self_list:2285 case op_get_string_length:2286 case op_put_by_id_generic:2287 case op_put_by_id_replace:2288 case op_put_by_id_transition:2289 ASSERT_NOT_REACHED();2290 }2291 }2292 2293 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size());2294 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size());2295 }2296 2297 2298 void JIT::privateCompileLinkPass()2299 {2300 unsigned jmpTableCount = m_jmpTable.size();2301 for (unsigned i = 0; i < jmpTableCount; ++i)2302 __ link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);2303 m_jmpTable.clear();2304 }2305 2306 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \2307 case name: { \2308 __ link(iter->from, __ label()); \2309 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \2310 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \2311 emitCTICall(i, Interpreter::cti_##name); \2312 emitPutVirtualRegister(instruction[i + 1].u.operand); \2313 i += 4; \2314 break; \2315 }2316 2317 #define CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(name) \2318 case name: { \2319 __ link(iter->from, __ label()); \2320 __ link((++iter)->from, __ label()); \2321 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \2322 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \2323 emitCTICall(i, Interpreter::cti_##name); \2324 emitPutVirtualRegister(instruction[i + 1].u.operand); \2325 i += 4; \2326 break; \2327 }2328 2329 void JIT::privateCompileSlowCases()2330 {2331 unsigned propertyAccessInstructionIndex = 0;2332 unsigned callLinkInfoIndex = 0;2333 2334 Instruction* instruction = m_codeBlock->instructions.begin();2335 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {2336 // FIXME: enable peephole optimizations for slow cases when applicable2337 killLastResultRegister();2338 2339 unsigned i = iter->to;2340 #ifndef NDEBUG2341 unsigned firstTo = i;2342 #endif2343 2344 switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(instruction[i].u.opcode)) {2345 case op_convert_this: {2346 __ link(iter->from, __ label());2347 __ link((++iter)->from, __ label());2348 emitPutCTIArg(X86::eax, 0);2349 emitCTICall(i, Interpreter::cti_op_convert_this);2350 emitPutVirtualRegister(instruction[i + 1].u.operand);2351 i += 2;2352 break;2353 }2354 case op_add: {2355 unsigned dst = instruction[i + 1].u.operand;2356 unsigned src1 = instruction[i + 2].u.operand;2357 unsigned src2 = instruction[i + 3].u.operand;2358 if (JSValue* value = getConstantImmediateNumericArg(src1)) {2359 JmpSrc notImm = iter->from;2360 __ link((++iter)->from, __ label());2361 __ subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);2362 __ link(notImm, __ label());2363 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);2364 emitPutCTIArg(X86::eax, 4);2365 emitCTICall(i, Interpreter::cti_op_add);2366 emitPutVirtualRegister(dst);2367 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {2368 JmpSrc notImm = iter->from;2369 __ link((++iter)->from, __ label());2370 __ subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);2371 __ link(notImm, __ label());2372 emitPutCTIArg(X86::eax, 0);2373 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);2374 emitCTICall(i, Interpreter::cti_op_add);2375 emitPutVirtualRegister(dst);2376 } else {2377 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);2378 if (types.first().mightBeNumber() && types.second().mightBeNumber())2379 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);2380 else2381 ASSERT_NOT_REACHED();2382 }2383 2384 i += 5;2385 break;2386 }2387 case op_get_by_val: {2388 // The slow case that handles accesses to arrays (below) may jump back up to here.2389 JmpDst beginGetByValSlow = __ label();2390 2391 JmpSrc notImm = iter->from;2392 __ link((++iter)->from, __ label());2393 __ link((++iter)->from, __ label());2394 emitFastArithIntToImmNoCheck(X86::edx);2395 __ link(notImm, __ label());2396 emitPutCTIArg(X86::eax, 0);2397 emitPutCTIArg(X86::edx, 4);2398 emitCTICall(i, Interpreter::cti_op_get_by_val);2399 emitPutVirtualRegister(instruction[i + 1].u.operand);2400 __ link(__ jmp(), m_labels[i + 4]);2401 2402 // This is slow case that handles accesses to arrays above the fast cut-off.2403 // First, check if this is an access to the vector2404 __ link((++iter)->from, __ label());2405 __ cmpl_rm(X86::edx, FIELD_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);2406 __ link(__ jbe(), beginGetByValSlow);2407 2408 // okay, missed the fast region, but it is still in the vector. Get the value.2409 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);2410 // Check whether the value loaded is zero; if so we need to return undefined.2411 __ testl_rr(X86::ecx, X86::ecx);2412 __ link(__ je(), beginGetByValSlow);2413 __ movl_rr(X86::ecx, X86::eax);2414 emitPutVirtualRegister(instruction[i + 1].u.operand, X86::eax);2415 2416 i += 4;2417 break;2418 }2419 case op_sub: {2420 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);2421 i += 5;2422 break;2423 }2424 case op_rshift: {2425 unsigned src2 = instruction[i + 3].u.operand;2426 __ link(iter->from, __ label());2427 if (getConstantImmediateNumericArg(src2))2428 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);2429 else {2430 __ link((++iter)->from, __ label());2431 emitPutCTIArg(X86::ecx, 4);2432 }2433 2434 emitPutCTIArg(X86::eax, 0);2435 emitCTICall(i, Interpreter::cti_op_rshift);2436 emitPutVirtualRegister(instruction[i + 1].u.operand);2437 i += 4;2438 break;2439 }2440 case op_lshift: {2441 JmpSrc notImm1 = iter->from;2442 JmpSrc notImm2 = (++iter)->from;2443 __ link((++iter)->from, __ label());2444 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);2445 __ link(notImm1, __ label());2446 __ link(notImm2, __ label());2447 emitPutCTIArg(X86::eax, 0);2448 emitPutCTIArg(X86::ecx, 4);2449 emitCTICall(i, Interpreter::cti_op_lshift);2450 emitPutVirtualRegister(instruction[i + 1].u.operand);2451 i += 4;2452 break;2453 }2454 case op_loop_if_less: {2455 emitSlowScriptCheck(i);2456 2457 unsigned target = instruction[i + 3].u.operand;2458 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);2459 if (src2imm) {2460 __ link(iter->from, __ label());2461 emitPutCTIArg(X86::edx, 0);2462 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);2463 emitCTICall(i, Interpreter::cti_op_loop_if_less);2464 __ testl_rr(X86::eax, X86::eax);2465 __ link(__ jne(), m_labels[i + 3 + target]);2466 } else {2467 __ link(iter->from, __ label());2468 __ link((++iter)->from, __ label());2469 emitPutCTIArg(X86::eax, 0);2470 emitPutCTIArg(X86::edx, 4);2471 emitCTICall(i, Interpreter::cti_op_loop_if_less);2472 __ testl_rr(X86::eax, X86::eax);2473 __ link(__ jne(), m_labels[i + 3 + target]);2474 }2475 i += 4;2476 break;2477 }2478 case op_put_by_id: {2479 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 1].u.operand))2480 ++iter;2481 __ link(iter->from, __ label());2482 2483 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);2484 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);2485 emitPutCTIArg(X86::eax, 0);2486 emitPutCTIArg(X86::edx, 8);2487 JmpSrc call = emitCTICall(i, Interpreter::cti_op_put_by_id);2488 2489 // Track the location of the call; this will be used to recover repatch information.2490 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);2491 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;2492 ++propertyAccessInstructionIndex;2493 2494 i += 8;2495 break;2496 }2497 case op_get_by_id: {2498 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset2499 // so that we only need track one pointer into the slow case code - we track a pointer to the location2500 // of the call (which we can use to look up the repatch information), but should a array-length or2501 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back2502 // the distance from the call to the head of the slow case.2503 2504 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 2].u.operand))2505 ++iter;2506 __ link(iter->from, __ label());2507 2508 #ifndef NDEBUG2509 JmpDst coldPathBegin = __ label();2510 #endif2511 emitPutCTIArg(X86::eax, 0);2512 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);2513 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);2514 JmpSrc call = emitCTICall(i, Interpreter::cti_op_get_by_id);2515 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);2516 emitPutVirtualRegister(instruction[i + 1].u.operand);2517 2518 // Track the location of the call; this will be used to recover repatch information.2519 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);2520 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;2521 ++propertyAccessInstructionIndex;2522 2523 i += 8;2524 break;2525 }2526 case op_loop_if_lesseq: {2527 emitSlowScriptCheck(i);2528 2529 unsigned target = instruction[i + 3].u.operand;2530 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);2531 if (src2imm) {2532 __ link(iter->from, __ label());2533 emitPutCTIArg(X86::edx, 0);2534 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);2535 emitCTICall(i, Interpreter::cti_op_loop_if_lesseq);2536 __ testl_rr(X86::eax, X86::eax);2537 __ link(__ jne(), m_labels[i + 3 + target]);2538 } else {2539 __ link(iter->from, __ label());2540 __ link((++iter)->from, __ label());2541 emitPutCTIArg(X86::eax, 0);2542 emitPutCTIArg(X86::edx, 4);2543 emitCTICall(i, Interpreter::cti_op_loop_if_lesseq);2544 __ testl_rr(X86::eax, X86::eax);2545 __ link(__ jne(), m_labels[i + 3 + target]);2546 }2547 i += 4;2548 break;2549 }2550 case op_pre_inc: {2551 unsigned srcDst = instruction[i + 1].u.operand;2552 JmpSrc notImm = iter->from;2553 __ link((++iter)->from, __ label());2554 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);2555 __ link(notImm, __ label());2556 emitPutCTIArg(X86::eax, 0);2557 emitCTICall(i, Interpreter::cti_op_pre_inc);2558 emitPutVirtualRegister(srcDst);2559 i += 2;2560 break;2561 }2562 case op_put_by_val: {2563 // Normal slow cases - either is not an immediate imm, or is an array.2564 JmpSrc notImm = iter->from;2565 __ link((++iter)->from, __ label());2566 __ link((++iter)->from, __ label());2567 emitFastArithIntToImmNoCheck(X86::edx);2568 __ link(notImm, __ label());2569 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i);2570 emitPutCTIArg(X86::eax, 0);2571 emitPutCTIArg(X86::edx, 4);2572 emitPutCTIArg(X86::ecx, 8);2573 emitCTICall(i, Interpreter::cti_op_put_by_val);2574 __ link(__ jmp(), m_labels[i + 4]);2575 2576 // slow cases for immediate int accesses to arrays2577 __ link((++iter)->from, __ label());2578 __ link((++iter)->from, __ label());2579 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i);2580 emitPutCTIArg(X86::eax, 0);2581 emitPutCTIArg(X86::edx, 4);2582 emitPutCTIArg(X86::ecx, 8);2583 emitCTICall(i, Interpreter::cti_op_put_by_val_array);2584 2585 i += 4;2586 break;2587 }2588 case op_loop_if_true: {2589 emitSlowScriptCheck(i);2590 2591 __ link(iter->from, __ label());2592 emitPutCTIArg(X86::eax, 0);2593 emitCTICall(i, Interpreter::cti_op_jtrue);2594 __ testl_rr(X86::eax, X86::eax);2595 unsigned target = instruction[i + 2].u.operand;2596 __ link(__ jne(), m_labels[i + 2 + target]);2597 i += 3;2598 break;2599 }2600 case op_pre_dec: {2601 unsigned srcDst = instruction[i + 1].u.operand;2602 JmpSrc notImm = iter->from;2603 __ link((++iter)->from, __ label());2604 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);2605 __ link(notImm, __ label());2606 emitPutCTIArg(X86::eax, 0);2607 emitCTICall(i, Interpreter::cti_op_pre_dec);2608 emitPutVirtualRegister(srcDst);2609 i += 2;2610 break;2611 }2612 case op_jnless: {2613 unsigned target = instruction[i + 3].u.operand;2614 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);2615 if (src2imm) {2616 __ link(iter->from, __ label());2617 emitPutCTIArg(X86::edx, 0);2618 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);2619 emitCTICall(i, Interpreter::cti_op_jless);2620 __ testl_rr(X86::eax, X86::eax);2621 __ link(__ je(), m_labels[i + 3 + target]);2622 } else {2623 __ link(iter->from, __ label());2624 __ link((++iter)->from, __ label());2625 emitPutCTIArg(X86::eax, 0);2626 emitPutCTIArg(X86::edx, 4);2627 emitCTICall(i, Interpreter::cti_op_jless);2628 __ testl_rr(X86::eax, X86::eax);2629 __ link(__ je(), m_labels[i + 3 + target]);2630 }2631 i += 4;2632 break;2633 }2634 case op_not: {2635 __ link(iter->from, __ label());2636 __ xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);2637 emitPutCTIArg(X86::eax, 0);2638 emitCTICall(i, Interpreter::cti_op_not);2639 emitPutVirtualRegister(instruction[i + 1].u.operand);2640 i += 3;2641 break;2642 }2643 case op_jfalse: {2644 __ link(iter->from, __ label());2645 emitPutCTIArg(X86::eax, 0);2646 emitCTICall(i, Interpreter::cti_op_jtrue);2647 __ testl_rr(X86::eax, X86::eax);2648 unsigned target = instruction[i + 2].u.operand;2649 __ link(__ je(), m_labels[i + 2 + target]); // inverted!2650 i += 3;2651 break;2652 }2653 case op_post_inc: {2654 unsigned srcDst = instruction[i + 2].u.operand;2655 __ link(iter->from, __ label());2656 __ link((++iter)->from, __ label());2657 emitPutCTIArg(X86::eax, 0);2658 emitCTICall(i, Interpreter::cti_op_post_inc);2659 emitPutVirtualRegister(srcDst, X86::edx);2660 emitPutVirtualRegister(instruction[i + 1].u.operand);2661 i += 3;2662 break;2663 }2664 case op_bitnot: {2665 __ link(iter->from, __ label());2666 emitPutCTIArg(X86::eax, 0);2667 emitCTICall(i, Interpreter::cti_op_bitnot);2668 emitPutVirtualRegister(instruction[i + 1].u.operand);2669 i += 3;2670 break;2671 }2672 case op_bitand: {2673 unsigned src1 = instruction[i + 2].u.operand;2674 unsigned src2 = instruction[i + 3].u.operand;2675 unsigned dst = instruction[i + 1].u.operand;2676 if (getConstantImmediateNumericArg(src1)) {2677 __ link(iter->from, __ label());2678 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);2679 emitPutCTIArg(X86::eax, 4);2680 emitCTICall(i, Interpreter::cti_op_bitand);2681 emitPutVirtualRegister(dst);2682 } else if (getConstantImmediateNumericArg(src2)) {2683 __ link(iter->from, __ label());2684 emitPutCTIArg(X86::eax, 0);2685 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);2686 emitCTICall(i, Interpreter::cti_op_bitand);2687 emitPutVirtualRegister(dst);2688 } else {2689 __ link(iter->from, __ label());2690 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);2691 emitPutCTIArg(X86::edx, 4);2692 emitCTICall(i, Interpreter::cti_op_bitand);2693 emitPutVirtualRegister(dst);2694 }2695 i += 5;2696 break;2697 }2698 case op_jtrue: {2699 __ link(iter->from, __ label());2700 emitPutCTIArg(X86::eax, 0);2701 emitCTICall(i, Interpreter::cti_op_jtrue);2702 __ testl_rr(X86::eax, X86::eax);2703 unsigned target = instruction[i + 2].u.operand;2704 __ link(__ jne(), m_labels[i + 2 + target]);2705 i += 3;2706 break;2707 }2708 case op_post_dec: {2709 unsigned srcDst = instruction[i + 2].u.operand;2710 __ link(iter->from, __ label());2711 __ link((++iter)->from, __ label());2712 emitPutCTIArg(X86::eax, 0);2713 emitCTICall(i, Interpreter::cti_op_post_dec);2714 emitPutVirtualRegister(srcDst, X86::edx);2715 emitPutVirtualRegister(instruction[i + 1].u.operand);2716 i += 3;2717 break;2718 }2719 case op_bitxor: {2720 __ link(iter->from, __ label());2721 emitPutCTIArg(X86::eax, 0);2722 emitPutCTIArg(X86::edx, 4);2723 emitCTICall(i, Interpreter::cti_op_bitxor);2724 emitPutVirtualRegister(instruction[i + 1].u.operand);2725 i += 5;2726 break;2727 }2728 case op_bitor: {2729 __ link(iter->from, __ label());2730 emitPutCTIArg(X86::eax, 0);2731 emitPutCTIArg(X86::edx, 4);2732 emitCTICall(i, Interpreter::cti_op_bitor);2733 emitPutVirtualRegister(instruction[i + 1].u.operand);2734 i += 5;2735 break;2736 }2737 case op_eq: {2738 __ link(iter->from, __ label());2739 emitPutCTIArg(X86::eax, 0);2740 emitPutCTIArg(X86::edx, 4);2741 emitCTICall(i, Interpreter::cti_op_eq);2742 emitPutVirtualRegister(instruction[i + 1].u.operand);2743 i += 4;2744 break;2745 }2746 case op_neq: {2747 __ link(iter->from, __ label());2748 emitPutCTIArg(X86::eax, 0);2749 emitPutCTIArg(X86::edx, 4);2750 emitCTICall(i, Interpreter::cti_op_neq);2751 emitPutVirtualRegister(instruction[i + 1].u.operand);2752 i += 4;2753 break;2754 }2755 CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(op_stricteq);2756 CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(op_nstricteq);2757 case op_instanceof: {2758 __ link(iter->from, __ label());2759 __ link((++iter)->from, __ label());2760 __ link((++iter)->from, __ label());2761 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);2762 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);2763 emitPutCTIArgFromVirtualRegister(instruction[i + 4].u.operand, 8, X86::ecx);2764 emitCTICall(i, Interpreter::cti_op_instanceof);2765 emitPutVirtualRegister(instruction[i + 1].u.operand);2766 i += 5;2767 break;2768 }2769 case op_mod: {2770 JmpSrc notImm1 = iter->from;2771 JmpSrc notImm2 = (++iter)->from;2772 __ link((++iter)->from, __ label());2773 emitFastArithReTagImmediate(X86::eax);2774 emitFastArithReTagImmediate(X86::ecx);2775 __ link(notImm1, __ label());2776 __ link(notImm2, __ label());2777 emitPutCTIArg(X86::eax, 0);2778 emitPutCTIArg(X86::ecx, 4);2779 emitCTICall(i, Interpreter::cti_op_mod);2780 emitPutVirtualRegister(instruction[i + 1].u.operand);2781 i += 4;2782 break;2783 }2784 case op_mul: {2785 int dst = instruction[i + 1].u.operand;2786 int src1 = instruction[i + 2].u.operand;2787 int src2 = instruction[i + 3].u.operand;2788 JSValue* src1Value = getConstantImmediateNumericArg(src1);2789 JSValue* src2Value = getConstantImmediateNumericArg(src2);2790 int32_t value;2791 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {2792 __ link(iter->from, __ label());2793 __ link((++iter)->from, __ label());2794 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.2795 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);2796 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);2797 emitCTICall(i, Interpreter::cti_op_mul);2798 emitPutVirtualRegister(dst);2799 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {2800 __ link(iter->from, __ label());2801 __ link((++iter)->from, __ label());2802 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.2803 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);2804 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);2805 emitCTICall(i, Interpreter::cti_op_mul);2806 emitPutVirtualRegister(dst);2807 } else2808 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);2809 i += 5;2810 break;2811 }2812 2813 case op_call:2814 case op_call_eval:2815 case op_construct: {2816 int dst = instruction[i + 1].u.operand;2817 int callee = instruction[i + 2].u.operand;2818 int argCount = instruction[i + 3].u.operand;2819 int registerOffset = instruction[i + 4].u.operand;2820 2821 __ link(iter->from, __ label());2822 2823 // The arguments have been set up on the hot path for op_call_eval2824 if (opcodeID == op_call)2825 compileOpCallSetupArgs(instruction + i);2826 else if (opcodeID == op_construct)2827 compileOpConstructSetupArgs(instruction + i);2828 2829 // Fast check for JS function.2830 __ testl_i32r(JSImmediate::TagMask, X86::ecx);2831 JmpSrc callLinkFailNotObject = __ jne();2832 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);2833 JmpSrc callLinkFailNotJSFunction = __ jne();2834 2835 // First, in the case of a construct, allocate the new object.2836 if (opcodeID == op_construct) {2837 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);2838 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);2839 emitGetVirtualRegister(callee, X86::ecx, i);2840 }2841 2842 __ movl_i32r(argCount, X86::edx);2843 2844 // Speculatively roll the callframe, assuming argCount will match the arity.2845 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);2846 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);2847 2848 m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =2849 emitNakedCall(i, m_interpreter->m_ctiVirtualCallPreLink);2850 2851 JmpSrc storeResultForFirstRun = __ jmp();2852 2853 // This is the address for the cold path *after* the first run (which tries to link the call).2854 m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label();2855 2856 // The arguments have been set up on the hot path for op_call_eval2857 if (opcodeID == op_call)2858 compileOpCallSetupArgs(instruction + i);2859 else if (opcodeID == op_construct)2860 compileOpConstructSetupArgs(instruction + i);2861 2862 // Check for JSFunctions.2863 __ testl_i32r(JSImmediate::TagMask, X86::ecx);2864 JmpSrc isNotObject = __ jne();2865 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);2866 JmpSrc isJSFunction = __ je();2867 2868 // This handles host functions2869 JmpDst notJSFunctionlabel = __ label();2870 __ link(isNotObject, notJSFunctionlabel);2871 __ link(callLinkFailNotObject, notJSFunctionlabel);2872 __ link(callLinkFailNotJSFunction, notJSFunctionlabel);2873 emitCTICall(i, ((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));2874 JmpSrc wasNotJSFunction = __ jmp();2875 2876 // Next, handle JSFunctions...2877 __ link(isJSFunction, __ label());2878 2879 // First, in the case of a construct, allocate the new object.2880 if (opcodeID == op_construct) {2881 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);2882 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);2883 emitGetVirtualRegister(callee, X86::ecx, i);2884 }2885 2886 // Speculatively roll the callframe, assuming argCount will match the arity.2887 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);2888 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);2889 __ movl_i32r(argCount, X86::edx);2890 2891 emitNakedCall(i, m_interpreter->m_ctiVirtualCall);2892 2893 // Put the return value in dst. In the interpreter, op_ret does this.2894 JmpDst storeResult = __ label();2895 __ link(wasNotJSFunction, storeResult);2896 __ link(storeResultForFirstRun, storeResult);2897 emitPutVirtualRegister(dst);2898 354 2899 355 #if ENABLE(CODEBLOCK_SAMPLING) 2900 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); 2901 #endif 2902 ++callLinkInfoIndex; 2903 2904 i += (opcodeID == op_construct ? 7 : 5); 2905 break; 2906 } 2907 case op_to_jsnumber: { 2908 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 2].u.operand)) 2909 ++iter; 2910 __ link(iter->from, __ label()); 2911 2912 emitPutCTIArg(X86::eax, 0); 2913 emitCTICall(i, Interpreter::cti_op_to_jsnumber); 2914 2915 emitPutVirtualRegister(instruction[i + 1].u.operand); 2916 i += 3; 2917 break; 2918 } 2919 2920 default: 2921 ASSERT_NOT_REACHED(); 2922 break; 2923 } 2924 2925 ASSERT_WITH_MESSAGE((iter + 1) == m_slowCases.end() || firstTo != (iter + 1)->to,"Not enough jumps linked in slow case codegen."); 2926 ASSERT_WITH_MESSAGE(firstTo == iter->to, "Too many jumps linked in slow case codegen."); 2927 2928 __ link(__ jmp(), m_labels[i]); 2929 } 2930 2931 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size()); 2932 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size()); 2933 } 2934 2935 void JIT::privateCompile() 2936 { 2937 #if ENABLE(CODEBLOCK_SAMPLING) 2938 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); 2939 #endif 2940 #if ENABLE(OPCODE_SAMPLING) 2941 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin()), m_interpreter->sampler()->sampleSlot()); 2942 #endif 2943 2944 // Could use a popl_m, but would need to offset the following instruction if so. 2945 __ popl_r(X86::ecx); 2946 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC); 2947 2948 JmpSrc slowRegisterFileCheck; 2949 JmpDst afterRegisterFileCheck; 2950 if (m_codeBlock->codeType == FunctionCode) { 2951 // In the case of a fast linked call, we do not set this up in the caller. 2952 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edi); 2953 2954 emitGetCTIParam(CTI_ARGS_registerFile, X86::eax); 2955 __ leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx); 2956 __ cmpl_mr(FIELD_OFFSET(RegisterFile, m_end), X86::eax, X86::edx); 2957 slowRegisterFileCheck = __ jg(); 2958 afterRegisterFileCheck = __ label(); 2959 } 2960 2961 privateCompileMainPass(); 2962 privateCompileLinkPass(); 2963 privateCompileSlowCases(); 2964 2965 if (m_codeBlock->codeType == FunctionCode) { 2966 __ link(slowRegisterFileCheck, __ label()); 2967 emitCTICall(0, Interpreter::cti_register_file_check); 2968 JmpSrc backToBody = __ jmp(); 2969 __ link(backToBody, afterRegisterFileCheck); 2970 } 2971 2972 ASSERT(m_jmpTable.isEmpty()); 2973 2974 void* code = __ executableCopy(); 2975 2976 // Translate vPC offsets into addresses in JIT generated code, for switch tables. 2977 for (unsigned i = 0; i < m_switches.size(); ++i) { 2978 SwitchRecord record = m_switches[i]; 2979 unsigned bytecodeIndex = record.bytecodeIndex; 2980 2981 if (record.type != SwitchRecord::String) { 2982 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 2983 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); 2984 2985 record.jumpTable.simpleJumpTable->ctiDefault = __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + record.defaultOffset]); 2986 2987 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { 2988 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; 2989 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; 2990 } 2991 } else { 2992 ASSERT(record.type == SwitchRecord::String); 2993 2994 record.jumpTable.stringJumpTable->ctiDefault = __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + record.defaultOffset]); 2995 2996 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); 2997 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { 2998 unsigned offset = it->second.branchOffset; 2999 it->second.ctiOffset = offset ? __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault; 3000 } 3001 } 3002 } 3003 3004 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter) 3005 iter->nativeCode = __ getRelocatedAddress(code, m_labels[iter->target]); 3006 3007 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { 3008 if (iter->to) 3009 X86Assembler::link(code, iter->from, iter->to); 3010 m_codeBlock->ctiReturnAddressVPCMap.add(__ getRelocatedAddress(code, iter->from), iter->bytecodeIndex); 3011 } 3012 3013 // Link absolute addresses for jsr 3014 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) 3015 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target); 3016 3017 for (unsigned i = 0; i < m_codeBlock->propertyAccessInstructions.size(); ++i) { 3018 StructureStubInfo& info = m_codeBlock->propertyAccessInstructions[i]; 3019 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].callReturnLocation); 3020 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].hotPathBegin); 3021 } 3022 for (unsigned i = 0; i < m_codeBlock->callLinkInfos.size(); ++i) { 3023 CallLinkInfo& info = m_codeBlock->callLinkInfos[i]; 3024 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].callReturnLocation); 3025 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathBegin); 3026 info.hotPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathOther); 3027 info.coldPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].coldPathOther); 3028 } 3029 3030 m_codeBlock->ctiCode = code; 3031 } 3032 3033 void JIT::privateCompileGetByIdSelf(Structure* structure, size_t cachedOffset, void* returnAddress) 3034 { 3035 // Check eax is an object of the right Structure. 3036 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3037 JmpSrc failureCases1 = __ jne(); 3038 JmpSrc failureCases2 = checkStructure(X86::eax, structure); 3039 3040 // Checks out okay! - getDirectOffset 3041 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 3042 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax); 3043 __ ret(); 3044 3045 void* code = __ executableCopy(); 3046 3047 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); 3048 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); 3049 3050 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3051 3052 ctiRepatchCallByReturnAddress(returnAddress, code); 3053 } 3054 3055 void JIT::privateCompileGetByIdProto(Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) 3056 { 3057 #if USE(CTI_REPATCH_PIC) 3058 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 3059 3060 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 3061 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); 3062 3063 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is 3064 // referencing the prototype object - let's speculatively load it's table nice and early!) 3065 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); 3066 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3067 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3068 3069 // Check eax is an object of the right Structure. 3070 JmpSrc failureCases1 = checkStructure(X86::eax, structure); 3071 3072 // Check the prototype object's Structure had not changed. 3073 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3074 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); 3075 JmpSrc failureCases2 = __ jne(); 3076 3077 // Checks out okay! - getDirectOffset 3078 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3079 3080 JmpSrc success = __ jmp(); 3081 3082 void* code = __ executableCopy(); 3083 3084 // Use the repatch information to link the failure cases back to the original slow case routine. 3085 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 3086 X86Assembler::link(code, failureCases1, slowCaseBegin); 3087 X86Assembler::link(code, failureCases2, slowCaseBegin); 3088 3089 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3090 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3091 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3092 3093 // Track the stub we have created so that it will be deleted later. 3094 info.stubRoutine = code; 3095 3096 // Finally repatch the jump to slow case back in the hot path to jump here instead. 3097 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3098 X86Assembler::repatchBranchOffset(jmpLocation, code); 3099 #else 3100 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is 3101 // referencing the prototype object - let's speculatively load it's table nice and early!) 3102 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); 3103 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3104 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3105 3106 // Check eax is an object of the right Structure. 3107 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3108 JmpSrc failureCases1 = __ jne(); 3109 JmpSrc failureCases2 = checkStructure(X86::eax, structure); 3110 3111 // Check the prototype object's Structure had not changed. 3112 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3113 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); 3114 JmpSrc failureCases3 = __ jne(); 3115 3116 // Checks out okay! - getDirectOffset 3117 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3118 3119 __ ret(); 3120 3121 void* code = __ executableCopy(); 3122 3123 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); 3124 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); 3125 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); 3126 3127 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3128 3129 ctiRepatchCallByReturnAddress(returnAddress, code); 3130 #endif 3131 } 3132 3133 #if USE(CTI_REPATCH_PIC) 3134 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) 3135 { 3136 JmpSrc failureCase = checkStructure(X86::eax, structure); 3137 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 3138 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax); 3139 JmpSrc success = __ jmp(); 3140 3141 void* code = __ executableCopy(); 3142 ASSERT(code); 3143 3144 // Use the repatch information to link the failure cases back to the original slow case routine. 3145 void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; 3146 if (!lastProtoBegin) 3147 lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 3148 3149 X86Assembler::link(code, failureCase, lastProtoBegin); 3150 3151 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3152 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3153 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3154 3155 structure->ref(); 3156 polymorphicStructures->list[currentIndex].set(cachedOffset, code, structure); 3157 3158 // Finally repatch the jump to slow case back in the hot path to jump here instead. 3159 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3160 X86Assembler::repatchBranchOffset(jmpLocation, code); 3161 } 3162 3163 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) 3164 { 3165 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is 3166 // referencing the prototype object - let's speculatively load it's table nice and early!) 3167 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); 3168 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3169 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3170 3171 // Check eax is an object of the right Structure. 3172 JmpSrc failureCases1 = checkStructure(X86::eax, structure); 3173 3174 // Check the prototype object's Structure had not changed. 3175 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3176 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); 3177 JmpSrc failureCases2 = __ jne(); 3178 3179 // Checks out okay! - getDirectOffset 3180 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3181 3182 JmpSrc success = __ jmp(); 3183 3184 void* code = __ executableCopy(); 3185 3186 // Use the repatch information to link the failure cases back to the original slow case routine. 3187 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; 3188 X86Assembler::link(code, failureCases1, lastProtoBegin); 3189 X86Assembler::link(code, failureCases2, lastProtoBegin); 3190 3191 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3192 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3193 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3194 3195 structure->ref(); 3196 prototypeStructure->ref(); 3197 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, prototypeStructure); 3198 3199 // Finally repatch the jump to slow case back in the hot path to jump here instead. 3200 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3201 X86Assembler::repatchBranchOffset(jmpLocation, code); 3202 } 3203 3204 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) 3205 { 3206 ASSERT(count); 3207 3208 Vector<JmpSrc> bucketsOfFail; 3209 3210 // Check eax is an object of the right Structure. 3211 bucketsOfFail.append(checkStructure(X86::eax, structure)); 3212 3213 Structure* currStructure = structure; 3214 RefPtr<Structure>* chainEntries = chain->head(); 3215 JSObject* protoObject = 0; 3216 for (unsigned i = 0; i < count; ++i) { 3217 protoObject = asObject(currStructure->prototypeForLookup(callFrame)); 3218 currStructure = chainEntries[i].get(); 3219 3220 // Check the prototype object's Structure had not changed. 3221 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3222 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); 3223 bucketsOfFail.append(__ jne()); 3224 } 3225 ASSERT(protoObject); 3226 3227 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3228 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3229 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3230 JmpSrc success = __ jmp(); 3231 3232 void* code = __ executableCopy(); 3233 3234 // Use the repatch information to link the failure cases back to the original slow case routine. 3235 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; 3236 3237 for (unsigned i = 0; i < bucketsOfFail.size(); ++i) 3238 X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin); 3239 3240 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3241 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3242 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3243 3244 // Track the stub we have created so that it will be deleted later. 3245 structure->ref(); 3246 chain->ref(); 3247 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, chain); 3248 3249 // Finally repatch the jump to slow case back in the hot path to jump here instead. 3250 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3251 X86Assembler::repatchBranchOffset(jmpLocation, code); 3252 } 3253 #endif 3254 3255 void JIT::privateCompileGetByIdChain(Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) 3256 { 3257 #if USE(CTI_REPATCH_PIC) 3258 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 3259 3260 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 3261 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); 3262 3263 ASSERT(count); 3264 3265 Vector<JmpSrc> bucketsOfFail; 3266 3267 // Check eax is an object of the right Structure. 3268 bucketsOfFail.append(checkStructure(X86::eax, structure)); 3269 3270 Structure* currStructure = structure; 3271 RefPtr<Structure>* chainEntries = chain->head(); 3272 JSObject* protoObject = 0; 3273 for (unsigned i = 0; i < count; ++i) { 3274 protoObject = asObject(currStructure->prototypeForLookup(callFrame)); 3275 currStructure = chainEntries[i].get(); 3276 3277 // Check the prototype object's Structure had not changed. 3278 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3279 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); 3280 bucketsOfFail.append(__ jne()); 3281 } 3282 ASSERT(protoObject); 3283 3284 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3285 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3286 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3287 JmpSrc success = __ jmp(); 3288 3289 void* code = __ executableCopy(); 3290 3291 // Use the repatch information to link the failure cases back to the original slow case routine. 3292 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 3293 3294 for (unsigned i = 0; i < bucketsOfFail.size(); ++i) 3295 X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin); 3296 3297 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3298 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3299 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3300 3301 // Track the stub we have created so that it will be deleted later. 3302 info.stubRoutine = code; 3303 3304 // Finally repatch the jump to slow case back in the hot path to jump here instead. 3305 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3306 X86Assembler::repatchBranchOffset(jmpLocation, code); 3307 #else 3308 ASSERT(count); 3309 3310 Vector<JmpSrc> bucketsOfFail; 3311 3312 // Check eax is an object of the right Structure. 3313 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3314 bucketsOfFail.append(__ jne()); 3315 bucketsOfFail.append(checkStructure(X86::eax, structure)); 3316 3317 Structure* currStructure = structure; 3318 RefPtr<Structure>* chainEntries = chain->head(); 3319 JSObject* protoObject = 0; 3320 for (unsigned i = 0; i < count; ++i) { 3321 protoObject = asObject(currStructure->prototypeForLookup(callFrame)); 3322 currStructure = chainEntries[i].get(); 3323 3324 // Check the prototype object's Structure had not changed. 3325 Structure** prototypeStructureAddress = &(protoObject->m_structure); 3326 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); 3327 bucketsOfFail.append(__ jne()); 3328 } 3329 ASSERT(protoObject); 3330 3331 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 3332 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 3333 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 3334 __ ret(); 3335 3336 void* code = __ executableCopy(); 3337 3338 for (unsigned i = 0; i < bucketsOfFail.size(); ++i) 3339 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); 3340 3341 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3342 3343 ctiRepatchCallByReturnAddress(returnAddress, code); 3344 #endif 3345 } 3346 3347 void JIT::privateCompilePutByIdReplace(Structure* structure, size_t cachedOffset, void* returnAddress) 3348 { 3349 // Check eax is an object of the right Structure. 3350 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3351 JmpSrc failureCases1 = __ jne(); 3352 JmpSrc failureCases2 = checkStructure(X86::eax, structure); 3353 3354 // checks out okay! - putDirectOffset 3355 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 3356 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax); 3357 __ ret(); 3358 3359 void* code = __ executableCopy(); 3360 3361 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); 3362 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); 3363 3364 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3365 3366 ctiRepatchCallByReturnAddress(returnAddress, code); 3367 } 3368 3369 extern "C" { 3370 3371 static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize) 3372 { 3373 baseObject->allocatePropertyStorageInline(oldSize, newSize); 3374 return baseObject; 3375 } 3376 3377 } 3378 3379 static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure) 3380 { 3381 return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); 3382 } 3383 3384 void JIT::privateCompilePutByIdTransition(Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress) 3385 { 3386 Vector<JmpSrc, 16> failureCases; 3387 // Check eax is an object of the right Structure. 3388 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3389 failureCases.append(__ jne()); 3390 __ cmpl_i32m(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); 3391 failureCases.append(__ jne()); 3392 Vector<JmpSrc> successCases; 3393 3394 // ecx = baseObject 3395 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx); 3396 // proto(ecx) = baseObject->structure()->prototype() 3397 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx); 3398 failureCases.append(__ jne()); 3399 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx); 3400 3401 // ecx = baseObject->m_structure 3402 for (RefPtr<Structure>* it = chain->head(); *it; ++it) { 3403 // null check the prototype 3404 __ cmpl_i32r(asInteger(jsNull()), X86::ecx); 3405 successCases.append(__ je()); 3406 3407 // Check the structure id 3408 __ cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx); 3409 failureCases.append(__ jne()); 3410 3411 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx); 3412 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx); 3413 failureCases.append(__ jne()); 3414 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx); 3415 } 3416 3417 failureCases.append(__ jne()); 3418 for (unsigned i = 0; i < successCases.size(); ++i) 3419 __ link(successCases[i], __ label()); 3420 3421 JmpSrc callTarget; 3422 3423 // emit a call only if storage realloc is needed 3424 if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) { 3425 __ pushl_r(X86::edx); 3426 __ pushl_i32(newStructure->propertyStorageCapacity()); 3427 __ pushl_i32(oldStructure->propertyStorageCapacity()); 3428 __ pushl_r(X86::eax); 3429 callTarget = __ call(); 3430 __ addl_i32r(3 * sizeof(void*), X86::esp); 3431 __ popl_r(X86::edx); 3432 } 3433 3434 // Assumes m_refCount can be decremented easily, refcount decrement is safe as 3435 // codeblock should ensure oldStructure->m_refCount > 0 3436 __ subl_i8m(1, reinterpret_cast<void*>(oldStructure)); 3437 __ addl_i8m(1, reinterpret_cast<void*>(newStructure)); 3438 __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); 3439 3440 // write the value 3441 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 3442 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax); 3443 3444 __ ret(); 3445 3446 JmpSrc failureJump; 3447 if (failureCases.size()) { 3448 for (unsigned i = 0; i < failureCases.size(); ++i) 3449 __ link(failureCases[i], __ label()); 3450 __ restoreArgumentReferenceForTrampoline(); 3451 failureJump = __ jmp(); 3452 } 3453 3454 void* code = __ executableCopy(); 3455 3456 if (failureCases.size()) 3457 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); 3458 3459 if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) 3460 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage)); 3461 3462 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3463 3464 ctiRepatchCallByReturnAddress(returnAddress, code); 3465 } 3466 3467 void JIT::unlinkCall(CallLinkInfo* callLinkInfo) 3468 { 3469 // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid 3470 // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive 3471 // match). Reset the check so it no longer matches. 3472 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = asPointer(JSImmediate::impossibleValue()); 3473 } 3474 3475 void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount) 3476 { 3477 // Currently we only link calls with the exact number of arguments. 3478 if (callerArgCount == calleeCodeBlock->numParameters) { 3479 ASSERT(!callLinkInfo->isLinked()); 3480 3481 calleeCodeBlock->addCaller(callLinkInfo); 3482 3483 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = callee; 3484 ctiRepatchCallByReturnAddress(callLinkInfo->hotPathOther, ctiCode); 3485 } 3486 3487 // repatch the instruction that jumps out to the cold path, so that we only try to link once. 3488 void* repatchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + repatchOffsetOpCallCall); 3489 ctiRepatchCallByReturnAddress(repatchCheck, callLinkInfo->coldPathOther); 3490 } 3491 3492 void JIT::privateCompileCTIMachineTrampolines() 3493 { 3494 // (1) The first function provides fast property access for array length 3495 3496 // Check eax is an array 3497 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3498 JmpSrc array_failureCases1 = __ jne(); 3499 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax); 3500 JmpSrc array_failureCases2 = __ jne(); 3501 3502 // Checks out okay! - get the length from the storage 3503 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::eax); 3504 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax); 3505 3506 __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::eax); 3507 JmpSrc array_failureCases3 = __ ja(); 3508 3509 __ addl_rr(X86::eax, X86::eax); 3510 __ addl_i8r(1, X86::eax); 3511 3512 __ ret(); 3513 3514 // (2) The second function provides fast property access for string length 3515 3516 JmpDst stringLengthBegin = __ align(16); 3517 3518 // Check eax is a string 3519 __ testl_i32r(JSImmediate::TagMask, X86::eax); 3520 JmpSrc string_failureCases1 = __ jne(); 3521 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsStringVptr), X86::eax); 3522 JmpSrc string_failureCases2 = __ jne(); 3523 3524 // Checks out okay! - get the length from the Ustring. 3525 __ movl_mr(FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep), X86::eax, X86::eax); 3526 __ movl_mr(FIELD_OFFSET(UString::Rep, len), X86::eax, X86::eax); 3527 3528 __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::eax); 3529 JmpSrc string_failureCases3 = __ ja(); 3530 3531 __ addl_rr(X86::eax, X86::eax); 3532 __ addl_i8r(1, X86::eax); 3533 3534 __ ret(); 3535 3536 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct. 3537 3538 JmpDst virtualCallPreLinkBegin = __ align(16); 3539 3540 // Load the callee CodeBlock* into eax 3541 __ movl_mr(FIELD_OFFSET(JSFunction, m_body), X86::ecx, X86::eax); 3542 __ movl_mr(FIELD_OFFSET(FunctionBodyNode, m_code), X86::eax, X86::eax); 3543 __ testl_rr(X86::eax, X86::eax); 3544 JmpSrc hasCodeBlock1 = __ jne(); 3545 __ popl_r(X86::ebx); 3546 __ restoreArgumentReference(); 3547 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3548 JmpSrc callJSFunction1 = __ call(); 3549 emitGetCTIArg(0, X86::ecx); 3550 emitGetCTIArg(8, X86::edx); 3551 __ pushl_r(X86::ebx); 3552 __ link(hasCodeBlock1, __ label()); 3553 3554 // Check argCount matches callee arity. 3555 __ cmpl_rm(X86::edx, FIELD_OFFSET(CodeBlock, numParameters), X86::eax); 3556 JmpSrc arityCheckOkay1 = __ je(); 3557 __ popl_r(X86::ebx); 3558 emitPutCTIArg(X86::ebx, 4); 3559 emitPutCTIArg(X86::eax, 12); 3560 __ restoreArgumentReference(); 3561 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3562 JmpSrc callArityCheck1 = __ call(); 3563 __ movl_rr(X86::edx, X86::edi); 3564 emitGetCTIArg(0, X86::ecx); 3565 emitGetCTIArg(8, X86::edx); 3566 __ pushl_r(X86::ebx); 3567 __ link(arityCheckOkay1, __ label()); 3568 3569 compileOpCallInitializeCallFrame(); 3570 3571 __ popl_r(X86::ebx); 3572 emitPutCTIArg(X86::ebx, 4); 3573 __ restoreArgumentReference(); 3574 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3575 JmpSrc callDontLazyLinkCall = __ call(); 3576 __ pushl_r(X86::ebx); 3577 3578 __ jmp_r(X86::eax); 3579 3580 JmpDst virtualCallLinkBegin = __ align(16); 3581 3582 // Load the callee CodeBlock* into eax 3583 __ movl_mr(FIELD_OFFSET(JSFunction, m_body), X86::ecx, X86::eax); 3584 __ movl_mr(FIELD_OFFSET(FunctionBodyNode, m_code), X86::eax, X86::eax); 3585 __ testl_rr(X86::eax, X86::eax); 3586 JmpSrc hasCodeBlock2 = __ jne(); 3587 __ popl_r(X86::ebx); 3588 __ restoreArgumentReference(); 3589 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3590 JmpSrc callJSFunction2 = __ call(); 3591 emitGetCTIArg(0, X86::ecx); 3592 emitGetCTIArg(8, X86::edx); 3593 __ pushl_r(X86::ebx); 3594 __ link(hasCodeBlock2, __ label()); 3595 3596 // Check argCount matches callee arity. 3597 __ cmpl_rm(X86::edx, FIELD_OFFSET(CodeBlock, numParameters), X86::eax); 3598 JmpSrc arityCheckOkay2 = __ je(); 3599 __ popl_r(X86::ebx); 3600 emitPutCTIArg(X86::ebx, 4); 3601 emitPutCTIArg(X86::eax, 12); 3602 __ restoreArgumentReference(); 3603 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3604 JmpSrc callArityCheck2 = __ call(); 3605 __ movl_rr(X86::edx, X86::edi); 3606 emitGetCTIArg(0, X86::ecx); 3607 emitGetCTIArg(8, X86::edx); 3608 __ pushl_r(X86::ebx); 3609 __ link(arityCheckOkay2, __ label()); 3610 3611 compileOpCallInitializeCallFrame(); 3612 3613 __ popl_r(X86::ebx); 3614 emitPutCTIArg(X86::ebx, 4); 3615 __ restoreArgumentReference(); 3616 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3617 JmpSrc callLazyLinkCall = __ call(); 3618 __ pushl_r(X86::ebx); 3619 3620 __ jmp_r(X86::eax); 3621 3622 JmpDst virtualCallBegin = __ align(16); 3623 3624 // Load the callee CodeBlock* into eax 3625 __ movl_mr(FIELD_OFFSET(JSFunction, m_body), X86::ecx, X86::eax); 3626 __ movl_mr(FIELD_OFFSET(FunctionBodyNode, m_code), X86::eax, X86::eax); 3627 __ testl_rr(X86::eax, X86::eax); 3628 JmpSrc hasCodeBlock3 = __ jne(); 3629 __ popl_r(X86::ebx); 3630 __ restoreArgumentReference(); 3631 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3632 JmpSrc callJSFunction3 = __ call(); 3633 emitGetCTIArg(0, X86::ecx); 3634 emitGetCTIArg(8, X86::edx); 3635 __ pushl_r(X86::ebx); 3636 __ link(hasCodeBlock3, __ label()); 3637 3638 // Check argCount matches callee arity. 3639 __ cmpl_rm(X86::edx, FIELD_OFFSET(CodeBlock, numParameters), X86::eax); 3640 JmpSrc arityCheckOkay3 = __ je(); 3641 __ popl_r(X86::ebx); 3642 emitPutCTIArg(X86::ebx, 4); 3643 emitPutCTIArg(X86::eax, 12); 3644 __ restoreArgumentReference(); 3645 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame); 3646 JmpSrc callArityCheck3 = __ call(); 3647 __ movl_rr(X86::edx, X86::edi); 3648 emitGetCTIArg(0, X86::ecx); 3649 emitGetCTIArg(8, X86::edx); 3650 __ pushl_r(X86::ebx); 3651 __ link(arityCheckOkay3, __ label()); 3652 3653 compileOpCallInitializeCallFrame(); 3654 3655 // load ctiCode from the new codeBlock. 3656 __ movl_mr(FIELD_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax); 3657 3658 __ jmp_r(X86::eax); 3659 3660 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. 3661 3662 void* code = __ executableCopy(); 3663 3664 X86Assembler::link(code, array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); 3665 X86Assembler::link(code, array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); 3666 X86Assembler::link(code, array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); 3667 X86Assembler::link(code, string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); 3668 X86Assembler::link(code, string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); 3669 X86Assembler::link(code, string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); 3670 X86Assembler::link(code, callArityCheck1, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); 3671 X86Assembler::link(code, callArityCheck2, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); 3672 X86Assembler::link(code, callArityCheck3, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); 3673 X86Assembler::link(code, callJSFunction1, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); 3674 X86Assembler::link(code, callJSFunction2, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); 3675 X86Assembler::link(code, callJSFunction3, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); 3676 X86Assembler::link(code, callDontLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall)); 3677 X86Assembler::link(code, callLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall)); 3678 3679 m_interpreter->m_ctiArrayLengthTrampoline = code; 3680 m_interpreter->m_ctiStringLengthTrampoline = X86Assembler::getRelocatedAddress(code, stringLengthBegin); 3681 m_interpreter->m_ctiVirtualCallPreLink = X86Assembler::getRelocatedAddress(code, virtualCallPreLinkBegin); 3682 m_interpreter->m_ctiVirtualCallLink = X86Assembler::getRelocatedAddress(code, virtualCallLinkBegin); 3683 m_interpreter->m_ctiVirtualCall = X86Assembler::getRelocatedAddress(code, virtualCallBegin); 3684 } 3685 3686 void JIT::freeCTIMachineTrampolines(Interpreter* interpreter) 3687 { 3688 WTF::fastFreeExecutable(interpreter->m_ctiArrayLengthTrampoline); 3689 } 3690 3691 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress) 3692 { 3693 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 3694 3695 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic. 3696 // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. 3697 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); 3698 3699 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for. 3700 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 3701 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructure, reinterpret_cast<uint32_t>(structure)); 3702 } 3703 3704 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress) 3705 { 3706 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 3707 3708 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 3709 // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. 3710 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic)); 3711 3712 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for. 3713 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 3714 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructure, reinterpret_cast<uint32_t>(structure)); 3715 } 3716 3717 void JIT::privateCompilePatchGetArrayLength(void* returnAddress) 3718 { 3719 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 3720 3721 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 3722 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); 3723 3724 // Check eax is an array 3725 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax); 3726 JmpSrc failureCases1 = __ jne(); 3727 3728 // Checks out okay! - get the length from the storage 3729 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 3730 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx); 3731 3732 __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::ecx); 3733 JmpSrc failureCases2 = __ ja(); 3734 3735 __ addl_rr(X86::ecx, X86::ecx); 3736 __ addl_i8r(1, X86::ecx); 3737 __ movl_rr(X86::ecx, X86::eax); 3738 JmpSrc success = __ jmp(); 3739 3740 void* code = __ executableCopy(); 3741 3742 // Use the repatch information to link the failure cases back to the original slow case routine. 3743 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 3744 X86Assembler::link(code, failureCases1, slowCaseBegin); 3745 X86Assembler::link(code, failureCases2, slowCaseBegin); 3746 3747 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 3748 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 3749 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 3750 3751 // Track the stub we have created so that it will be deleted later. 3752 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 3753 3754 // Finally repatch the jump to sow case back in the hot path to jump here instead. 3755 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 3756 X86Assembler::repatchBranchOffset(jmpLocation, code); 3757 } 3758 3759 void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) 3760 { 3761 __ movl_mr(FIELD_OFFSET(JSVariableObject, d), variableObject, dst); 3762 __ movl_mr(FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers), dst, dst); 3763 __ movl_mr(index * sizeof(Register), dst, dst); 3764 } 3765 3766 void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index) 3767 { 3768 __ movl_mr(FIELD_OFFSET(JSVariableObject, d), variableObject, variableObject); 3769 __ movl_mr(FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers), variableObject, variableObject); 3770 __ movl_rm(src, index * sizeof(Register), variableObject); 3771 } 356 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); 357 #endif 358 } 359 360 #endif 3772 361 3773 362 } // namespace JSC
Note:
See TracChangeset
for help on using the changeset viewer.