@@ -34,7 +34,7 @@ use cranelift_codegen::{
3434 isa:: {
3535 unwind:: UnwindInst ,
3636 x64:: {
37- args:: { Avx512Opcode , AvxOpcode , FenceKind , RegMemImm , XmmMemImm , CC } ,
37+ args:: { Avx512Opcode , AvxOpcode , FenceKind , CC } ,
3838 settings as x64_settings, AtomicRmwSeqOp ,
3939 } ,
4040 } ,
@@ -1655,7 +1655,7 @@ impl Masm for MacroAssembler {
16551655 . xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
16561656 self . asm
16571657 . xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1658- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1658+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
16591659 }
16601660 VectorEqualityKind :: F32x4 | VectorEqualityKind :: F64x2 => {
16611661 self . asm
@@ -1694,7 +1694,7 @@ impl Masm for MacroAssembler {
16941694 . xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
16951695 self . asm
16961696 . xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1697- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1697+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
16981698 }
16991699 VectorCompareKind :: F32x4 | VectorCompareKind :: F64x2 => {
17001700 self . asm
@@ -1727,7 +1727,7 @@ impl Masm for MacroAssembler {
17271727 . xmm_vpcmpgt_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
17281728 self . asm
17291729 . xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1730- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1730+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
17311731 }
17321732 VectorCompareKind :: I8x16U | VectorCompareKind :: I16x8U | VectorCompareKind :: I32x4U => {
17331733 // Set the `rhs` vector to the signed minimum values and then
@@ -1772,7 +1772,7 @@ impl Masm for MacroAssembler {
17721772 . xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
17731773 self . asm
17741774 . xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1775- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1775+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
17761776 }
17771777 VectorCompareKind :: F32x4 | VectorCompareKind :: F64x2 => {
17781778 // Do a less than comparison with the operands swapped.
@@ -1806,7 +1806,7 @@ impl Masm for MacroAssembler {
18061806 . xmm_vpcmpgt_rrr ( writable ! ( rhs) , rhs, lhs, kind. lane_size ( ) ) ;
18071807 self . asm . xmm_vpcmpeq_rrr ( dst, lhs, lhs, kind. lane_size ( ) ) ;
18081808 self . asm
1809- . xmm_rmi_rvex ( AvxOpcode :: Vpxor , dst. to_reg ( ) , rhs, dst) ;
1809+ . xmm_vex_rr ( AvxOpcode :: Vpxor , dst. to_reg ( ) , rhs, dst) ;
18101810 }
18111811 VectorCompareKind :: I8x16U | VectorCompareKind :: I16x8U | VectorCompareKind :: I32x4U => {
18121812 // Set lanes to maximum values and compare them for equality.
@@ -1835,34 +1835,34 @@ impl Masm for MacroAssembler {
18351835 let tmp = regs:: scratch_xmm ( ) ;
18361836 // First, we initialize `tmp` with all ones, by comparing it with itself.
18371837 self . asm
1838- . xmm_rmi_rvex ( AvxOpcode :: Vpcmpeqd , tmp, tmp, writable ! ( tmp) ) ;
1838+ . xmm_vex_rr ( AvxOpcode :: Vpcmpeqd , tmp, tmp, writable ! ( tmp) ) ;
18391839 // then we `xor` tmp and `dst` together, yielding `!dst`.
18401840 self . asm
1841- . xmm_rmi_rvex ( AvxOpcode :: Vpxor , tmp, dst. to_reg ( ) , dst) ;
1841+ . xmm_vex_rr ( AvxOpcode :: Vpxor , tmp, dst. to_reg ( ) , dst) ;
18421842 Ok ( ( ) )
18431843 }
18441844
18451845 fn v128_and ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
18461846 self . ensure_has_avx ( ) ?;
1847- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpand , src1, src2, dst) ;
1847+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpand , src1, src2, dst) ;
18481848 Ok ( ( ) )
18491849 }
18501850
18511851 fn v128_and_not ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
18521852 self . ensure_has_avx ( ) ?;
1853- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpandn , src1, src2, dst) ;
1853+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpandn , src1, src2, dst) ;
18541854 Ok ( ( ) )
18551855 }
18561856
18571857 fn v128_or ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
18581858 self . ensure_has_avx ( ) ?;
1859- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpor , src1, src2, dst) ;
1859+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpor , src1, src2, dst) ;
18601860 Ok ( ( ) )
18611861 }
18621862
18631863 fn v128_xor ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
18641864 self . ensure_has_avx ( ) ?;
1865- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , src1, src2, dst) ;
1865+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , src1, src2, dst) ;
18661866 Ok ( ( ) )
18671867 }
18681868
@@ -1913,7 +1913,7 @@ impl Masm for MacroAssembler {
19131913 } ,
19141914 } ;
19151915
1916- self . asm . xmm_rmi_rvex ( op, lhs, rhs, dst) ;
1916+ self . asm . xmm_vex_rr ( op, lhs, rhs, dst) ;
19171917
19181918 Ok ( ( ) )
19191919 }
@@ -1948,7 +1948,7 @@ impl Masm for MacroAssembler {
19481948 } ,
19491949 } ;
19501950
1951- self . asm . xmm_rmi_rvex ( op, lhs, rhs, dst) ;
1951+ self . asm . xmm_vex_rr ( op, lhs, rhs, dst) ;
19521952
19531953 Ok ( ( ) )
19541954 }
@@ -1965,7 +1965,7 @@ impl Masm for MacroAssembler {
19651965
19661966 let mul_avx = |this : & mut Self , op| {
19671967 this. asm
1968- . xmm_rmi_rvex ( op, lhs. reg , rhs. reg , writable ! ( lhs. reg) ) ;
1968+ . xmm_vex_rr ( op, lhs. reg , rhs. reg , writable ! ( lhs. reg) ) ;
19691969 } ;
19701970
19711971 let mul_i64x2_avx512 = |this : & mut Self | {
@@ -2004,48 +2004,36 @@ impl Masm for MacroAssembler {
20042004 let tmp2 = context. any_fpr ( this) ?;
20052005
20062006 // tmp1 = lhs_hi = (lhs >> 32)
2007- this. asm . xmm_rmi_rvex (
2008- AvxOpcode :: Vpsrlq ,
2009- lhs. reg ,
2010- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2011- writable ! ( tmp1) ,
2012- ) ;
2007+ this. asm
2008+ . xmm_vex_ri ( AvxOpcode :: Vpsrlq , lhs. reg , 32 , writable ! ( tmp1) ) ;
20132009 // tmp2 = lhs_hi * rhs_low = tmp1 * rhs
20142010 this. asm
2015- . xmm_rmi_rvex ( AvxOpcode :: Vpmuldq , tmp1, rhs. reg , writable ! ( tmp2) ) ;
2011+ . xmm_vex_rr ( AvxOpcode :: Vpmuldq , tmp1, rhs. reg , writable ! ( tmp2) ) ;
20162012
20172013 // tmp1 = rhs_hi = rhs >> 32
2018- this. asm . xmm_rmi_rvex (
2019- AvxOpcode :: Vpsrlq ,
2020- rhs. reg ,
2021- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2022- writable ! ( tmp1) ,
2023- ) ;
2014+ this. asm
2015+ . xmm_vex_ri ( AvxOpcode :: Vpsrlq , rhs. reg , 32 , writable ! ( tmp1) ) ;
20242016
20252017 // tmp1 = lhs_low * rhs_high = tmp1 * lhs
20262018 this. asm
2027- . xmm_rmi_rvex ( AvxOpcode :: Vpmuludq , tmp1, lhs. reg , writable ! ( tmp1) ) ;
2019+ . xmm_vex_rr ( AvxOpcode :: Vpmuludq , tmp1, lhs. reg , writable ! ( tmp1) ) ;
20282020
20292021 // tmp1 = ((lhs_hi * rhs_low) + (lhs_lo * rhs_hi)) = tmp1 + tmp2
20302022 this. asm
2031- . xmm_rmi_rvex ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( tmp1) ) ;
2023+ . xmm_vex_rr ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( tmp1) ) ;
20322024
20332025 //tmp1 = tmp1 << 32
2034- this. asm . xmm_rmi_rvex (
2035- AvxOpcode :: Vpsllq ,
2036- tmp1,
2037- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2038- writable ! ( tmp1) ,
2039- ) ;
2026+ this. asm
2027+ . xmm_vex_ri ( AvxOpcode :: Vpsllq , tmp1, 32 , writable ! ( tmp1) ) ;
20402028
20412029 // tmp2 = lhs_lo + rhs_lo
20422030 this. asm
2043- . xmm_rmi_rvex ( AvxOpcode :: Vpmuludq , lhs. reg , rhs. reg , writable ! ( tmp2) ) ;
2031+ . xmm_vex_rr ( AvxOpcode :: Vpmuludq , lhs. reg , rhs. reg , writable ! ( tmp2) ) ;
20442032
20452033 // finally, with `lhs` as destination:
20462034 // lhs = (lhs_low * rhs_low) + ((lhs_hi * rhs_low) + (lhs_lo * rhs_hi)) = tmp1 + tmp2
20472035 this. asm
2048- . xmm_rmi_rvex ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( lhs. reg) ) ;
2036+ . xmm_vex_rr ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( lhs. reg) ) ;
20492037
20502038 context. free_reg ( tmp2) ;
20512039
0 commit comments