@@ -596,6 +596,7 @@ public static class AMD64MIOp extends AMD64ImmOp {
596596 // @formatter:off
597597 public static final AMD64MIOp BT = new AMD64MIOp ("BT" , true , P_0F , 0xBA , 4 , true , OpAssertion .WordOrLargerAssertion );
598598 public static final AMD64MIOp BTR = new AMD64MIOp ("BTR" , true , P_0F , 0xBA , 6 , true , OpAssertion .WordOrLargerAssertion );
599+ public static final AMD64MIOp BTS = new AMD64MIOp ("BTS" , true , P_0F , 0xBA , 5 , true , OpAssertion .WordOrLargerAssertion );
599600 public static final AMD64MIOp MOVB = new AMD64MIOp ("MOVB" , true , 0xC6 , 0 , false , OpAssertion .ByteAssertion );
600601 public static final AMD64MIOp MOV = new AMD64MIOp ("MOV" , false , 0xC7 , 0 , false , OpAssertion .WordOrLargerAssertion );
601602 public static final AMD64MIOp SAR = new AMD64MIOp ("SAR" , true , 0xC1 , 7 , true , OpAssertion .WordOrLargerAssertion );
@@ -679,8 +680,9 @@ public boolean isMemRead() {
679680 * <p>
680681 * Note that when {@code src} is a memory address, we will choose {@code dst} as {@code nds}
681682 * even if {@link PreferredNDS#SRC} is specified, which implies an implicit dependency to
682- * {@code dst}. In {@link jdk.graal.compiler.lir.amd64.vector.AMD64VectorUnary.AVXConvertOp}, we
683- * manually insert an {@code XOR} instruction for {@code dst}.
683+ * {@code dst}. In
684+ * {@link jdk.graal.compiler.lir.amd64.vector.AMD64VectorUnary.AVXConvertToFloatOp}, we manually
685+ * insert an {@code XOR} instruction for {@code dst}.
684686 */
685687 private enum PreferredNDS {
686688 NONE ,
@@ -1316,9 +1318,11 @@ private enum VEXOpAssertion {
13161318 XMM_CPU_AVX1_AVX512BW_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512BW_128 , XMM , null , CPU ),
13171319 XMM_CPU_AVX1_AVX512DQ_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512DQ_128 , XMM , null , CPU ),
13181320 CPU_XMM_AVX1_AVX512F_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512F_128 , CPU , null , XMM ),
1321+ CPU_XMM_AVX512F_128ONLY (null , EVEXFeatureAssertion .AVX512F_128 , CPU , null , XMM ),
13191322 XMM_XMM_CPU_AVX1_AVX512F_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512F_128 , XMM , XMM , CPU ),
13201323 XMM_XMM_CPU_AVX1_AVX512BW_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512BW_128 , XMM , XMM , CPU ),
13211324 XMM_XMM_CPU_AVX1_AVX512DQ_128ONLY (VEXFeatureAssertion .AVX1_128 , EVEXFeatureAssertion .AVX512DQ_128 , XMM , XMM , CPU ),
1325+ XMM_XMM_CPU_AVX512F_128ONLY (null , EVEXFeatureAssertion .AVX512F_128 , XMM , XMM , CPU ),
13221326 XMM_CPU_AVX512BW_VL (null , EVEXFeatureAssertion .AVX512F_BW_VL , XMM , null , CPU ),
13231327 XMM_CPU_AVX512F_VL (null , EVEXFeatureAssertion .AVX512F_VL , XMM , null , CPU ),
13241328 AVX1_AVX512F_VL (VEXFeatureAssertion .AVX1 , EVEXFeatureAssertion .AVX512F_VL , XMM , XMM , XMM ),
@@ -1680,8 +1684,12 @@ public static class VexRMOp extends VexRROp {
16801684 // EVEX encoded instructions
16811685 public static final VexRMOp EVCVTTSS2SI = new VexRMOp ("EVCVTTSS2SI" , VCVTTSS2SI );
16821686 public static final VexRMOp EVCVTTSS2SQ = new VexRMOp ("EVCVTTSS2SQ" , VCVTTSS2SQ );
1687+ public static final VexRMOp EVCVTTSS2USI = new VexRMOp ("EVCVTTSS2USI" , VEXPrefixConfig .P_F3 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x78 , VEXOpAssertion .CPU_XMM_AVX512F_128ONLY , EVEXTuple .T1F_32BIT , VEXPrefixConfig .W0 , true );
1688+ public static final VexRMOp EVCVTTSS2USQ = new VexRMOp ("EVCVTTSS2USQ" , VEXPrefixConfig .P_F3 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W1 , 0x78 , VEXOpAssertion .CPU_XMM_AVX512F_128ONLY , EVEXTuple .T1F_32BIT , VEXPrefixConfig .W1 , true );
16831689 public static final VexRMOp EVCVTTSD2SI = new VexRMOp ("EVCVTTSD2SI" , VCVTTSD2SI );
16841690 public static final VexRMOp EVCVTTSD2SQ = new VexRMOp ("EVCVTTSD2SQ" , VCVTTSD2SQ );
1691+ public static final VexRMOp EVCVTTSD2USI = new VexRMOp ("EVCVTTSD2USI" , VEXPrefixConfig .P_F2 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x78 , VEXOpAssertion .CPU_XMM_AVX512F_128ONLY , EVEXTuple .T1F_64BIT , VEXPrefixConfig .W0 , true );
1692+ public static final VexRMOp EVCVTTSD2USQ = new VexRMOp ("EVCVTTSD2USQ" , VEXPrefixConfig .P_F2 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W1 , 0x78 , VEXOpAssertion .CPU_XMM_AVX512F_128ONLY , EVEXTuple .T1F_64BIT , VEXPrefixConfig .W1 , true );
16851693 public static final VexRMOp EVCVTPS2PD = new VexRMOp ("EVCVTPS2PD" , VCVTPS2PD );
16861694 public static final VexRMOp EVCVTPD2PS = new VexRMOp ("EVCVTPD2PS" , VCVTPD2PS );
16871695 public static final VexRMOp EVCVTDQ2PS = new VexRMOp ("EVCVTDQ2PS" , VCVTDQ2PS );
@@ -2588,12 +2596,21 @@ public static final class VexRVMConvertOp extends VexRVMOp {
25882596 public static final VexRVMConvertOp EVCVTSQ2SD = new VexRVMConvertOp ("EVCVTSQ2SD" , VCVTSQ2SD );
25892597 public static final VexRVMConvertOp EVCVTSI2SS = new VexRVMConvertOp ("EVCVTSI2SS" , VCVTSI2SS );
25902598 public static final VexRVMConvertOp EVCVTSQ2SS = new VexRVMConvertOp ("EVCVTSQ2SS" , VCVTSQ2SS );
2599+
2600+ public static final VexRVMConvertOp EVCVTUSI2SD = new VexRVMConvertOp ("EVCVTUSI2SD" , VEXPrefixConfig .P_F2 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x7B , VEXOpAssertion .XMM_XMM_CPU_AVX512F_128ONLY , EVEXTuple .T1S_32BIT , VEXPrefixConfig .W0 , true );
2601+ public static final VexRVMConvertOp EVCVTUSQ2SD = new VexRVMConvertOp ("EVCVTUSQ2SD" , VEXPrefixConfig .P_F2 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x7B , VEXOpAssertion .XMM_XMM_CPU_AVX512F_128ONLY , EVEXTuple .T1S_64BIT , VEXPrefixConfig .W1 , true );
2602+ public static final VexRVMConvertOp EVCVTUSI2SS = new VexRVMConvertOp ("EVCVTUSI2SS" , VEXPrefixConfig .P_F3 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x7B , VEXOpAssertion .XMM_XMM_CPU_AVX512F_128ONLY , EVEXTuple .T1S_32BIT , VEXPrefixConfig .W0 , true );
2603+ public static final VexRVMConvertOp EVCVTUSQ2SS = new VexRVMConvertOp ("EVCVTUSQ2SS" , VEXPrefixConfig .P_F3 , VEXPrefixConfig .M_0F , VEXPrefixConfig .W0 , 0x7B , VEXOpAssertion .XMM_XMM_CPU_AVX512F_128ONLY , EVEXTuple .T1S_64BIT , VEXPrefixConfig .W1 , true );
25912604 // @formatter:on
25922605
25932606 private VexRVMConvertOp (String opcode , int pp , int mmmmm , int w , int op , VEXOpAssertion assertion , EVEXTuple evexTuple , int wEvex ) {
25942607 super (opcode , pp , mmmmm , w , op , assertion , evexTuple , wEvex );
25952608 }
25962609
2610+ private VexRVMConvertOp (String opcode , int pp , int mmmmm , int w , int op , VEXOpAssertion assertion , EVEXTuple evexTuple , int wEvex , boolean isEvex ) {
2611+ super (opcode , pp , mmmmm , w , op , assertion , evexTuple , wEvex , isEvex );
2612+ }
2613+
25972614 /**
25982615 * Build the EVEX variant of a given vexOp.
25992616 */
@@ -4863,6 +4880,10 @@ public final void btrq(Register src, int imm8) {
48634880 AMD64MIOp .BTR .emit (this , OperandSize .QWORD , src , imm8 );
48644881 }
48654882
4883+ public final void btsq (Register src , int imm8 ) {
4884+ AMD64MIOp .BTS .emit (this , OperandSize .QWORD , src , imm8 );
4885+ }
4886+
48664887 public final void cmpb (Register dst , Register src ) {
48674888 AMD64BinaryArithmetic .CMP .byteRmOp .emit (this , OperandSize .BYTE , dst , src );
48684889 }
@@ -5874,6 +5895,14 @@ public final void subsd(Register dst, AMD64Address src) {
58745895 SSEOp .SUB .emit (this , OperandSize .SD , dst , src );
58755896 }
58765897
5898+ public final void subss (Register dst , Register src ) {
5899+ SSEOp .SUB .emit (this , OperandSize .SS , dst , src );
5900+ }
5901+
5902+ public final void subss (Register dst , AMD64Address src ) {
5903+ SSEOp .SUB .emit (this , OperandSize .SS , dst , src );
5904+ }
5905+
58775906 public final void testl (Register dst , Register src ) {
58785907 AMD64RMOp .TEST .emit (this , OperandSize .DWORD , dst , src );
58795908 }
0 commit comments