Changeset 43543 in webkit for trunk/JavaScriptCore/jit/JITArithmetic.cpp
- Timestamp:
- May 11, 2009, 9:20:29 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/jit/JITArithmetic.cpp
r43534 r43543 46 46 using namespace std; 47 47 48 // All X86 Macs are guaranteed to support at least SSE249 #if PLATFORM(X86_64) || (PLATFORM(X86) && PLATFORM(MAC))50 51 static inline bool isSSE2Present()52 {53 return true;54 }55 56 #else57 58 static bool isSSE2Present()59 {60 static const int SSE2FeatureBit = 1 << 26;61 struct SSE2Check {62 SSE2Check()63 {64 int flags;65 #if COMPILER(MSVC)66 _asm {67 mov eax, 1 // cpuid function 1 gives us the standard feature set68 cpuid;69 mov flags, edx;70 }71 #elif COMPILER(GCC)72 asm (73 "movl $0x1, %%eax;"74 "pushl %%ebx;"75 "cpuid;"76 "popl %%ebx;"77 "movl %%edx, %0;"78 : "=g" (flags)79 :80 : "%eax", "%ecx", "%edx"81 );82 #else83 flags = 0;84 #endif85 present = (flags & SSE2FeatureBit) != 0;86 }87 bool present;88 };89 static SSE2Check check;90 return check.present;91 }92 93 #endif94 95 48 namespace JSC { 96 49 … … 120 73 emitPutVirtualRegister(result); 121 74 } 75 122 76 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 123 77 { … … 278 232 } 279 233 } 234 280 235 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 281 236 { … … 465 420 } 466 421 } 422 467 423 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 468 424 { … … 650 606 emitPutVirtualRegister(result); 651 607 } 608 652 609 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 653 610 { … … 675 632 } 676 633 634 void JIT::emit_op_post_inc(Instruction* currentInstruction) 635 { 636 unsigned result = currentInstruction[1].u.operand; 637 unsigned srcDst = currentInstruction[2].u.operand; 638 639 emitGetVirtualRegister(srcDst, regT0); 640 move(regT0, regT1); 641 emitJumpSlowCaseIfNotImmediateInteger(regT0); 642 #if USE(ALTERNATE_JSIMMEDIATE) 643 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1)); 644 emitFastArithIntToImmNoCheck(regT1, regT1); 645 #else 646 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); 647 signExtend32ToPtr(regT1, regT1); 648 #endif 649 emitPutVirtualRegister(srcDst, regT1); 650 emitPutVirtualRegister(result); 651 } 652 653 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 654 { 655 unsigned result = currentInstruction[1].u.operand; 656 unsigned srcDst = currentInstruction[2].u.operand; 657 658 linkSlowCase(iter); 659 linkSlowCase(iter); 660 JITStubCall stubCall(this, JITStubs::cti_op_post_inc); 661 stubCall.addArgument(regT0); 662 stubCall.addArgument(Imm32(srcDst)); 663 stubCall.call(result); 664 } 665 666 void JIT::emit_op_post_dec(Instruction* currentInstruction) 667 { 668 unsigned result = currentInstruction[1].u.operand; 669 unsigned srcDst = currentInstruction[2].u.operand; 670 671 emitGetVirtualRegister(srcDst, regT0); 672 move(regT0, regT1); 673 emitJumpSlowCaseIfNotImmediateInteger(regT0); 674 #if USE(ALTERNATE_JSIMMEDIATE) 675 addSlowCase(branchSub32(Zero, Imm32(1), regT1)); 676 emitFastArithIntToImmNoCheck(regT1, regT1); 677 #else 678 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); 679 signExtend32ToPtr(regT1, regT1); 680 #endif 681 emitPutVirtualRegister(srcDst, regT1); 682 emitPutVirtualRegister(result); 683 } 684 685 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 686 { 687 unsigned result = currentInstruction[1].u.operand; 688 unsigned srcDst = currentInstruction[2].u.operand; 689 690 linkSlowCase(iter); 691 linkSlowCase(iter); 692 JITStubCall stubCall(this, JITStubs::cti_op_post_dec); 693 stubCall.addArgument(regT0); 694 stubCall.addArgument(Imm32(srcDst)); 695 stubCall.call(result); 696 } 697 698 void JIT::emit_op_pre_inc(Instruction* currentInstruction) 699 { 700 unsigned srcDst = currentInstruction[1].u.operand; 701 702 emitGetVirtualRegister(srcDst, regT0); 703 emitJumpSlowCaseIfNotImmediateInteger(regT0); 704 #if USE(ALTERNATE_JSIMMEDIATE) 705 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); 706 emitFastArithIntToImmNoCheck(regT0, regT0); 707 #else 708 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); 709 signExtend32ToPtr(regT0, regT0); 710 #endif 711 emitPutVirtualRegister(srcDst); 712 } 713 714 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 715 { 716 unsigned srcDst = currentInstruction[1].u.operand; 717 718 Jump notImm = getSlowCase(iter); 719 linkSlowCase(iter); 720 emitGetVirtualRegister(srcDst, regT0); 721 notImm.link(this); 722 JITStubCall stubCall(this, JITStubs::cti_op_pre_inc); 723 stubCall.addArgument(regT0); 724 stubCall.call(srcDst); 725 } 726 727 void JIT::emit_op_pre_dec(Instruction* currentInstruction) 728 { 729 unsigned srcDst = currentInstruction[1].u.operand; 730 731 emitGetVirtualRegister(srcDst, regT0); 732 emitJumpSlowCaseIfNotImmediateInteger(regT0); 733 #if USE(ALTERNATE_JSIMMEDIATE) 734 addSlowCase(branchSub32(Zero, Imm32(1), regT0)); 735 emitFastArithIntToImmNoCheck(regT0, regT0); 736 #else 737 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); 738 signExtend32ToPtr(regT0, regT0); 739 #endif 740 emitPutVirtualRegister(srcDst); 741 } 742 743 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 744 { 745 unsigned srcDst = currentInstruction[1].u.operand; 746 747 Jump notImm = getSlowCase(iter); 748 linkSlowCase(iter); 749 emitGetVirtualRegister(srcDst, regT0); 750 notImm.link(this); 751 JITStubCall stubCall(this, JITStubs::cti_op_pre_dec); 752 stubCall.addArgument(regT0); 753 stubCall.call(srcDst); 754 } 755 756 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ 757 677 758 #if PLATFORM(X86) || PLATFORM(X86_64) 759 678 760 void JIT::emit_op_mod(Instruction* currentInstruction) 679 761 { … … 699 781 emitPutVirtualRegister(result); 700 782 } 783 701 784 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 702 785 { … … 721 804 stubCall.call(result); 722 805 } 723 #else 806 807 #else // PLATFORM(X86) || PLATFORM(X86_64) 808 724 809 void JIT::emit_op_mod(Instruction* currentInstruction) 725 810 { … … 733 818 stubCall.call(result); 734 819 } 820 735 821 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&) 736 822 { 737 823 ASSERT_NOT_REACHED(); 738 824 } 739 #endif 740 741 void JIT::emit_op_post_inc(Instruction* currentInstruction) 742 { 743 unsigned result = currentInstruction[1].u.operand; 744 unsigned srcDst = currentInstruction[2].u.operand; 745 746 emitGetVirtualRegister(srcDst, regT0); 747 move(regT0, regT1); 748 emitJumpSlowCaseIfNotImmediateInteger(regT0); 749 #if USE(ALTERNATE_JSIMMEDIATE) 750 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1)); 751 emitFastArithIntToImmNoCheck(regT1, regT1); 752 #else 753 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); 754 signExtend32ToPtr(regT1, regT1); 755 #endif 756 emitPutVirtualRegister(srcDst, regT1); 757 emitPutVirtualRegister(result); 758 } 759 760 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 761 { 762 unsigned result = currentInstruction[1].u.operand; 763 unsigned srcDst = currentInstruction[2].u.operand; 764 765 linkSlowCase(iter); 766 linkSlowCase(iter); 767 JITStubCall stubCall(this, JITStubs::cti_op_post_inc); 768 stubCall.addArgument(regT0); 769 stubCall.addArgument(Imm32(srcDst)); 770 stubCall.call(result); 771 } 772 773 void JIT::emit_op_post_dec(Instruction* currentInstruction) 774 { 775 unsigned result = currentInstruction[1].u.operand; 776 unsigned srcDst = currentInstruction[2].u.operand; 777 778 emitGetVirtualRegister(srcDst, regT0); 779 move(regT0, regT1); 780 emitJumpSlowCaseIfNotImmediateInteger(regT0); 781 #if USE(ALTERNATE_JSIMMEDIATE) 782 addSlowCase(branchSub32(Zero, Imm32(1), regT1)); 783 emitFastArithIntToImmNoCheck(regT1, regT1); 784 #else 785 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); 786 signExtend32ToPtr(regT1, regT1); 787 #endif 788 emitPutVirtualRegister(srcDst, regT1); 789 emitPutVirtualRegister(result); 790 } 791 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 792 { 793 unsigned result = currentInstruction[1].u.operand; 794 unsigned srcDst = currentInstruction[2].u.operand; 795 796 linkSlowCase(iter); 797 linkSlowCase(iter); 798 JITStubCall stubCall(this, JITStubs::cti_op_post_dec); 799 stubCall.addArgument(regT0); 800 stubCall.addArgument(Imm32(srcDst)); 801 stubCall.call(result); 802 } 803 804 void JIT::emit_op_pre_inc(Instruction* currentInstruction) 805 { 806 unsigned srcDst = currentInstruction[1].u.operand; 807 808 emitGetVirtualRegister(srcDst, regT0); 809 emitJumpSlowCaseIfNotImmediateInteger(regT0); 810 #if USE(ALTERNATE_JSIMMEDIATE) 811 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); 812 emitFastArithIntToImmNoCheck(regT0, regT0); 813 #else 814 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); 815 signExtend32ToPtr(regT0, regT0); 816 #endif 817 emitPutVirtualRegister(srcDst); 818 } 819 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 820 { 821 unsigned srcDst = currentInstruction[1].u.operand; 822 823 Jump notImm = getSlowCase(iter); 824 linkSlowCase(iter); 825 emitGetVirtualRegister(srcDst, regT0); 826 notImm.link(this); 827 JITStubCall stubCall(this, JITStubs::cti_op_pre_inc); 828 stubCall.addArgument(regT0); 829 stubCall.call(srcDst); 830 } 831 832 void JIT::emit_op_pre_dec(Instruction* currentInstruction) 833 { 834 unsigned srcDst = currentInstruction[1].u.operand; 835 836 emitGetVirtualRegister(srcDst, regT0); 837 emitJumpSlowCaseIfNotImmediateInteger(regT0); 838 #if USE(ALTERNATE_JSIMMEDIATE) 839 addSlowCase(branchSub32(Zero, Imm32(1), regT0)); 840 emitFastArithIntToImmNoCheck(regT0, regT0); 841 #else 842 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); 843 signExtend32ToPtr(regT0, regT0); 844 #endif 845 emitPutVirtualRegister(srcDst); 846 } 847 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 848 { 849 unsigned srcDst = currentInstruction[1].u.operand; 850 851 Jump notImm = getSlowCase(iter); 852 linkSlowCase(iter); 853 emitGetVirtualRegister(srcDst, regT0); 854 notImm.link(this); 855 JITStubCall stubCall(this, JITStubs::cti_op_pre_dec); 856 stubCall.addArgument(regT0); 857 stubCall.call(srcDst); 858 } 859 825 826 #endif // PLATFORM(X86) || PLATFORM(X86_64) 827 828 /* ------------------------------ END: OP_MOD ------------------------------ */ 860 829 861 830 #if !ENABLE(JIT_OPTIMIZE_ARITHMETIC) 831 832 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ 862 833 863 834 void JIT::emit_op_add(Instruction* currentInstruction) … … 913 884 914 885 #elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC) 886 887 /* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ 915 888 916 889 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes) … … 1018 991 emitPutVirtualRegister(result); 1019 992 } 993 1020 994 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1021 995 { … … 1059 1033 emitPutVirtualRegister(result); 1060 1034 } 1035 1061 1036 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1062 1037 { … … 1090 1065 emitPutVirtualRegister(result); 1091 1066 } 1067 1092 1068 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1093 1069 { … … 1100 1076 } 1101 1077 1102 #else 1078 #else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC) 1079 1080 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ 1103 1081 1104 1082 typedef X86Assembler::JmpSrc JmpSrc; … … 1406 1384 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand)); 1407 1385 } 1386 1408 1387 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1409 1388 { … … 1411 1390 } 1412 1391 1413 #endif 1392 #endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC) 1393 1394 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */ 1414 1395 1415 1396 } // namespace JSC
Note:
See TracChangeset
for help on using the changeset viewer.