diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 2d2213b420f5a..1b29f9bdc0d25 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5325,6 +5325,21 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN, return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask); } +/// Is this mask local (i.e. elements only move within their local span), and +/// repeating (that is, the same rearrangement is being done within each span)? +static bool isLocalRepeatingShuffle(ArrayRef Mask, int Span) { + // TODO: Could improve the case where undef elements exist in the first span. + for (auto [I, M] : enumerate(Mask)) { + if (M == -1) + continue; + int ChunkLo = I - (I % Span); + int ChunkHi = ChunkLo + Span; + if (M < ChunkLo || M >= ChunkHi || M - ChunkLo != Mask[I % Span]) + return false; + } + return true; +} + /// Try to widen element type to get a new mask value for a better permutation /// sequence. This doesn't try to inspect the widened mask for profitability; /// we speculate the widened form is equal or better. This has the effect of @@ -5686,10 +5701,43 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, : DAG.getUNDEF(XLenVT)); } SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS); - LHSIndices = convertToScalableVector(IndexContainerVT, LHSIndices, DAG, - Subtarget); - SDValue Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices, - DAG.getUNDEF(ContainerVT), TrueMask, VL); + LHSIndices = + convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget); + + SDValue Gather; + // If we have a locally repeating mask, then we can reuse the first register + // in the index register group for all registers within the source register + // group. TODO: This generalizes to m2, and m4. Also, this is currently + // picking up cases with a fully undef tail which could be more directly + // handled with fewer redundant vrgathers + const MVT M1VT = getLMUL1VT(ContainerVT); + auto VLMAX = RISCVTargetLowering::computeVLMAXBounds(M1VT, Subtarget).first; + if (ContainerVT.bitsGT(M1VT) && isLocalRepeatingShuffle(Mask, VLMAX)) { + EVT SubIndexVT = M1VT.changeVectorElementType(IndexVT.getScalarType()); + SDValue SubIndex = + DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubIndexVT, LHSIndices, + DAG.getVectorIdxConstant(0, DL)); + auto [InnerTrueMask, InnerVL] = + getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget); + int N = ContainerVT.getVectorMinNumElements() / + M1VT.getVectorMinNumElements(); + assert(isPowerOf2_32(N) && N <= 8); + Gather = DAG.getUNDEF(ContainerVT); + for (int i = 0; i < N; i++) { + SDValue SubIdx = + DAG.getVectorIdxConstant(M1VT.getVectorMinNumElements() * i, DL); + SDValue SubV1 = + DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, V1, SubIdx); + SDValue SubVec = + DAG.getNode(GatherVVOpc, DL, M1VT, SubV1, SubIndex, + DAG.getUNDEF(M1VT), InnerTrueMask, InnerVL); + Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather, + SubVec, SubIdx); + } + } else { + Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices, + DAG.getUNDEF(ContainerVT), TrueMask, VL); + } return convertFromScalableVector(VT, Gather, DAG, Subtarget); } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll index 5fd7e47507f71..71a15077be6eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll @@ -874,27 +874,30 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) { define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: reverse_v32i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: addi a1, a0, -1 -; CHECK-NEXT: vrsub.vx v12, v12, a1 -; CHECK-NEXT: lui a1, 16 -; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: addi a2, a0, -1 +; CHECK-NEXT: vrsub.vx v10, v10, a2 +; CHECK-NEXT: lui a2, 16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma -; CHECK-NEXT: vrgatherei16.vv v15, v8, v12 -; CHECK-NEXT: vrgatherei16.vv v14, v9, v12 +; CHECK-NEXT: vrgatherei16.vv v15, v8, v10 +; CHECK-NEXT: vrgatherei16.vv v14, v12, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: vrsub.vi v10, v10, 15 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vrgather.vv v17, v13, v10 +; CHECK-NEXT: vrgather.vv v16, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vmv.s.x v0, a1 -; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vmv.s.x v0, a2 ; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: addi a0, a0, -32 -; CHECK-NEXT: vrsub.vi v12, v8, 15 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v14, a0 -; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t +; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret %res = shufflevector <16 x i8> %a, <16 x i8> %b, <32 x i32> ret <32 x i8> %res @@ -943,23 +946,25 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) { define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: reverse_v16i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: vrsub.vi v10, v10, 7 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vrgather.vv v13, v12, v10 +; CHECK-NEXT: vrgather.vv v12, v9, v10 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 1 ; CHECK-NEXT: addi a1, a1, -1 ; CHECK-NEXT: vrsub.vx v9, v9, a1 -; CHECK-NEXT: vrgather.vv v13, v8, v9 -; CHECK-NEXT: vrgather.vv v12, v11, v9 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 255 ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vrsub.vi v14, v8, 7 +; CHECK-NEXT: vrgather.vv v15, v8, v9 +; CHECK-NEXT: vrgather.vv v14, v10, v9 ; CHECK-NEXT: vmv.s.x v0, a1 -; CHECK-NEXT: vslidedown.vx v8, v12, a0 -; CHECK-NEXT: vrgather.vv v8, v10, v14, v0.t +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v14, a0 +; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret %res = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> ret <16 x i16> %res @@ -1024,24 +1029,27 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) { define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: reverse_v8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: srli a1, a0, 2 -; CHECK-NEXT: addi a1, a1, -1 -; CHECK-NEXT: vrsub.vx v9, v9, a1 -; CHECK-NEXT: vrgather.vv v13, v8, v9 -; CHECK-NEXT: vrgather.vv v12, v11, v9 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: vmv.v.i v0, 15 +; CHECK-NEXT: vrsub.vi v10, v10, 3 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v15, v11, v10 +; CHECK-NEXT: vrgatherei16.vv v14, v9, v10 +; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vrsub.vi v14, v8, 3 +; CHECK-NEXT: addi a1, a1, -1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vrsub.vx v10, v12, a1 +; CHECK-NEXT: vrgather.vv v13, v8, v10 +; CHECK-NEXT: vrgather.vv v12, v9, v10 +; CHECK-NEXT: vmv.v.i v0, 15 +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 -; CHECK-NEXT: vrgatherei16.vv v8, v10, v14, v0.t +; CHECK-NEXT: vmerge.vvm v8, v8, v14, v0 ; CHECK-NEXT: ret %res = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> ret <8 x i32> %res @@ -1197,23 +1205,25 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) { define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: reverse_v16f16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: vrsub.vi v10, v10, 7 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vrgather.vv v13, v12, v10 +; CHECK-NEXT: vrgather.vv v12, v9, v10 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 1 ; CHECK-NEXT: addi a1, a1, -1 ; CHECK-NEXT: vrsub.vx v9, v9, a1 -; CHECK-NEXT: vrgather.vv v13, v8, v9 -; CHECK-NEXT: vrgather.vv v12, v11, v9 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 255 ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vrsub.vi v14, v8, 7 +; CHECK-NEXT: vrgather.vv v15, v8, v9 +; CHECK-NEXT: vrgather.vv v14, v10, v9 ; CHECK-NEXT: vmv.s.x v0, a1 -; CHECK-NEXT: vslidedown.vx v8, v12, a0 -; CHECK-NEXT: vrgather.vv v8, v10, v14, v0.t +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v14, a0 +; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret %res = shufflevector <8 x half> %a, <8 x half> %b, <16 x i32> ret <16 x half> %res @@ -1269,24 +1279,27 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) { define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: reverse_v8f32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: srli a1, a0, 2 -; CHECK-NEXT: addi a1, a1, -1 -; CHECK-NEXT: vrsub.vx v9, v9, a1 -; CHECK-NEXT: vrgather.vv v13, v8, v9 -; CHECK-NEXT: vrgather.vv v12, v11, v9 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: vmv.v.i v0, 15 +; CHECK-NEXT: vrsub.vi v10, v10, 3 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v15, v11, v10 +; CHECK-NEXT: vrgatherei16.vv v14, v9, v10 +; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vrsub.vi v14, v8, 3 +; CHECK-NEXT: addi a1, a1, -1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vrsub.vx v10, v12, a1 +; CHECK-NEXT: vrgather.vv v13, v8, v10 +; CHECK-NEXT: vrgather.vv v12, v9, v10 +; CHECK-NEXT: vmv.v.i v0, 15 +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 -; CHECK-NEXT: vrgatherei16.vv v8, v10, v14, v0.t +; CHECK-NEXT: vmerge.vvm v8, v8, v14, v0 ; CHECK-NEXT: ret %res = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> ret <8 x float> %res diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll index 464b4eca35aba..86d8a275a9055 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll @@ -515,8 +515,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_16(<8 x i16> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v10, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgather.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle @@ -562,9 +564,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_32(<8 x i16> %v) { ; ZVKB-ZVE32X-NEXT: vmv.s.x v10, a0 ; ZVKB-ZVE32X-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 -; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle @@ -609,8 +612,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_48(<8 x i16> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v10, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgather.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle @@ -655,9 +660,12 @@ define <8 x i32> @shuffle_v8i32_as_i64(<8 x i32> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 -; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v13, v9, v16 ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v12, v8, v16 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v14, v10, v16 +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v15, v11, v16 +; ZVKB-ZVE32X-NEXT: vmv4r.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> ret <8 x i32> %shuffle @@ -726,8 +734,10 @@ define <8 x half> @shuffle_v8f16_as_i64_16(<8 x half> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v10, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgather.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle @@ -773,9 +783,10 @@ define <8 x half> @shuffle_v8f16_as_i64_32(<8 x half> %v) { ; ZVKB-ZVE32X-NEXT: vmv.s.x v10, a0 ; ZVKB-ZVE32X-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 -; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle @@ -820,8 +831,10 @@ define <8 x half> @shuffle_v8f16_as_i64_48(<8 x half> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v10, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v12, v10 +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgather.vv v11, v9, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v10, v8, v12 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 +; ZVKB-ZVE32X-NEXT: vmv2r.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle @@ -866,9 +879,12 @@ define <8 x float> @shuffle_v8f32_as_i64(<8 x float> %v) { ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 -; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVKB-ZVE32X-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v13, v9, v16 ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v12, v8, v16 -; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v14, v10, v16 +; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v15, v11, v16 +; ZVKB-ZVE32X-NEXT: vmv4r.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> ret <8 x float> %shuffle @@ -920,3 +936,87 @@ define <8 x float> @shuffle_v8f32_as_i64_exact(<8 x float> %v) vscale_range(2,2) %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> ret <8 x float> %shuffle } + +define <8 x i64> @shuffle_v8i64_as_i128(<8 x i64> %v) { +; CHECK-LABEL: shuffle_v8i64_as_i128: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI29_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI29_0) +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v16, (a0) +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v13, v9, v16 +; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 +; CHECK-NEXT: vrgatherei16.vv v14, v10, v16 +; CHECK-NEXT: vrgatherei16.vv v15, v11, v16 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +; +; ZVKB-V-LABEL: shuffle_v8i64_as_i128: +; ZVKB-V: # %bb.0: +; ZVKB-V-NEXT: lui a0, %hi(.LCPI29_0) +; ZVKB-V-NEXT: addi a0, a0, %lo(.LCPI29_0) +; ZVKB-V-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVKB-V-NEXT: vle16.v v16, (a0) +; ZVKB-V-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVKB-V-NEXT: vrgatherei16.vv v13, v9, v16 +; ZVKB-V-NEXT: vrgatherei16.vv v12, v8, v16 +; ZVKB-V-NEXT: vrgatherei16.vv v14, v10, v16 +; ZVKB-V-NEXT: vrgatherei16.vv v15, v11, v16 +; ZVKB-V-NEXT: vmv4r.v v8, v12 +; ZVKB-V-NEXT: ret + %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> + ret <8 x i64> %shuffle +} + +define <8 x i64> @shuffle_v8i64_as_i256(<8 x i64> %v) { +; CHECK-LABEL: shuffle_v8i64_as_i256: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI30_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI30_0) +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vle16.v v16, (a0) +; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +; +; ZVKB-V-LABEL: shuffle_v8i64_as_i256: +; ZVKB-V: # %bb.0: +; ZVKB-V-NEXT: lui a0, %hi(.LCPI30_0) +; ZVKB-V-NEXT: addi a0, a0, %lo(.LCPI30_0) +; ZVKB-V-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; ZVKB-V-NEXT: vle16.v v16, (a0) +; ZVKB-V-NEXT: vrgatherei16.vv v12, v8, v16 +; ZVKB-V-NEXT: vmv.v.v v8, v12 +; ZVKB-V-NEXT: ret + %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> + ret <8 x i64> %shuffle +} + +define <8 x i64> @shuffle_v8i64_as_i256_zvl256b(<8 x i64> %v) vscale_range(4,0) { +; CHECK-LABEL: shuffle_v8i64_as_i256_zvl256b: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI31_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI31_0) +; CHECK-NEXT: vsetivli zero, 8, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vrgatherei16.vv v11, v9, v12 +; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +; +; ZVKB-V-LABEL: shuffle_v8i64_as_i256_zvl256b: +; ZVKB-V: # %bb.0: +; ZVKB-V-NEXT: lui a0, %hi(.LCPI31_0) +; ZVKB-V-NEXT: addi a0, a0, %lo(.LCPI31_0) +; ZVKB-V-NEXT: vsetivli zero, 8, e16, mf2, ta, ma +; ZVKB-V-NEXT: vle16.v v12, (a0) +; ZVKB-V-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVKB-V-NEXT: vrgatherei16.vv v11, v9, v12 +; ZVKB-V-NEXT: vrgatherei16.vv v10, v8, v12 +; ZVKB-V-NEXT: vmv2r.v v8, v10 +; ZVKB-V-NEXT: ret + %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> + ret <8 x i64> %shuffle +}