Skip to content

Commit c4d96dc

Browse files
committed
[RISCV] Deprecate riscv.segN.load/store in favor of their mask variants
RISCVVectorPeepholePass would replace instructions with all-ones mask with their unmask variant, so there isn't really a point to keep separate versions of intrinsics.
1 parent 4a8852f commit c4d96dc

File tree

7 files changed

+84
-243
lines changed

7 files changed

+84
-243
lines changed

llvm/include/llvm/IR/IntrinsicsRISCV.td

+4-16
Original file line numberDiff line numberDiff line change
@@ -1704,14 +1704,10 @@ let TargetPrefix = "riscv" in {
17041704
}
17051705

17061706
// Segment loads/stores for fixed vectors.
1707+
// Note: we only have the masked variants because RISCVVectorPeephole
1708+
// would lower any instructions with all-ones mask into unmasked version
1709+
// anyway.
17071710
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1708-
// Input: (pointer, vl)
1709-
def int_riscv_seg # nf # _load
1710-
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1711-
!listsplat(LLVMMatchType<0>,
1712-
!add(nf, -1))),
1713-
[llvm_anyptr_ty, llvm_anyint_ty],
1714-
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17151711
// Input: (pointer, mask, vl)
17161712
def int_riscv_seg # nf # _load_mask
17171713
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
@@ -1721,15 +1717,7 @@ let TargetPrefix = "riscv" in {
17211717
llvm_anyint_ty],
17221718
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17231719

1724-
// Input: (<stored values>, pointer, vl)
1725-
def int_riscv_seg # nf # _store
1726-
: DefaultAttrsIntrinsic<[],
1727-
!listconcat([llvm_anyvector_ty],
1728-
!listsplat(LLVMMatchType<0>,
1729-
!add(nf, -1)),
1730-
[llvm_anyptr_ty, llvm_anyint_ty]),
1731-
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1732-
// Input: (<stored values>, pointer, mask, vl)
1720+
// Input: (<stored values>..., pointer, mask, vl)
17331721
def int_riscv_seg # nf # _store_mask
17341722
: DefaultAttrsIntrinsic<[],
17351723
!listconcat([llvm_anyvector_ty],

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

+37-94
Original file line numberDiff line numberDiff line change
@@ -1724,13 +1724,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17241724
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
17251725
MachineMemOperand::MOVolatile;
17261726
return true;
1727-
case Intrinsic::riscv_seg2_load:
1728-
case Intrinsic::riscv_seg3_load:
1729-
case Intrinsic::riscv_seg4_load:
1730-
case Intrinsic::riscv_seg5_load:
1731-
case Intrinsic::riscv_seg6_load:
1732-
case Intrinsic::riscv_seg7_load:
1733-
case Intrinsic::riscv_seg8_load:
17341727
case Intrinsic::riscv_seg2_load_mask:
17351728
case Intrinsic::riscv_seg3_load_mask:
17361729
case Intrinsic::riscv_seg4_load_mask:
@@ -1740,17 +1733,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17401733
case Intrinsic::riscv_seg8_load_mask:
17411734
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
17421735
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1743-
case Intrinsic::riscv_seg2_store:
1744-
case Intrinsic::riscv_seg3_store:
1745-
case Intrinsic::riscv_seg4_store:
1746-
case Intrinsic::riscv_seg5_store:
1747-
case Intrinsic::riscv_seg6_store:
1748-
case Intrinsic::riscv_seg7_store:
1749-
case Intrinsic::riscv_seg8_store:
1750-
// Operands are (vec, ..., vec, ptr, vl)
1751-
return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1752-
/*IsStore*/ true,
1753-
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
17541736
case Intrinsic::riscv_seg2_store_mask:
17551737
case Intrinsic::riscv_seg3_store_mask:
17561738
case Intrinsic::riscv_seg4_store_mask:
@@ -10462,13 +10444,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1046210444
switch (IntNo) {
1046310445
default:
1046410446
break;
10465-
case Intrinsic::riscv_seg2_load:
10466-
case Intrinsic::riscv_seg3_load:
10467-
case Intrinsic::riscv_seg4_load:
10468-
case Intrinsic::riscv_seg5_load:
10469-
case Intrinsic::riscv_seg6_load:
10470-
case Intrinsic::riscv_seg7_load:
10471-
case Intrinsic::riscv_seg8_load:
1047210447
case Intrinsic::riscv_seg2_load_mask:
1047310448
case Intrinsic::riscv_seg3_load_mask:
1047410449
case Intrinsic::riscv_seg4_load_mask:
@@ -10491,12 +10466,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1049110466
ContainerVT.getScalarSizeInBits();
1049210467
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1049310468

10494-
// Masked: (pointer, mask, vl)
10495-
// Non-masked: (pointer, vl)
10496-
bool IsMasked = Op.getNumOperands() > 4;
10469+
// Operands: (chain, int_id, pointer, mask, vl)
1049710470
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10498-
SDValue Mask =
10499-
IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10471+
SDValue Mask = Op.getOperand(3);
1050010472
MVT MaskVT = Mask.getSimpleValueType();
1050110473
if (MaskVT.isFixedLengthVector()) {
1050210474
MVT MaskContainerVT =
@@ -10570,13 +10542,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1057010542
switch (IntNo) {
1057110543
default:
1057210544
break;
10573-
case Intrinsic::riscv_seg2_store:
10574-
case Intrinsic::riscv_seg3_store:
10575-
case Intrinsic::riscv_seg4_store:
10576-
case Intrinsic::riscv_seg5_store:
10577-
case Intrinsic::riscv_seg6_store:
10578-
case Intrinsic::riscv_seg7_store:
10579-
case Intrinsic::riscv_seg8_store:
1058010545
case Intrinsic::riscv_seg2_store_mask:
1058110546
case Intrinsic::riscv_seg3_store_mask:
1058210547
case Intrinsic::riscv_seg4_store_mask:
@@ -10591,24 +10556,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1059110556
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
1059210557
Intrinsic::riscv_vsseg8_mask};
1059310558

10594-
bool IsMasked = false;
10595-
switch (IntNo) {
10596-
case Intrinsic::riscv_seg2_store_mask:
10597-
case Intrinsic::riscv_seg3_store_mask:
10598-
case Intrinsic::riscv_seg4_store_mask:
10599-
case Intrinsic::riscv_seg5_store_mask:
10600-
case Intrinsic::riscv_seg6_store_mask:
10601-
case Intrinsic::riscv_seg7_store_mask:
10602-
case Intrinsic::riscv_seg8_store_mask:
10603-
IsMasked = true;
10604-
break;
10605-
default:
10606-
break;
10607-
}
10608-
10609-
// Non-masked: (chain, int_id, vec*, ptr, vl)
10610-
// Masked: (chain, int_id, vec*, ptr, mask, vl)
10611-
unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10559+
// Operands: (chain, int_id, vec*, ptr, mask, vl)
10560+
unsigned NF = Op->getNumOperands() - 5;
1061210561
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
1061310562
MVT XLenVT = Subtarget.getXLenVT();
1061410563
MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10618,8 +10567,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1061810567
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1061910568

1062010569
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10621-
SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10622-
: getAllOnesMask(ContainerVT, VL, DL, DAG);
10570+
SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
1062310571
MVT MaskVT = Mask.getSimpleValueType();
1062410572
if (MaskVT.isFixedLengthVector()) {
1062510573
MVT MaskContainerVT =
@@ -23432,10 +23380,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2343223380
}
2343323381

2343423382
static const Intrinsic::ID FixedVlsegIntrIds[] = {
23435-
Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
23436-
Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
23437-
Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
23438-
Intrinsic::riscv_seg8_load};
23383+
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
23384+
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
23385+
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
23386+
Intrinsic::riscv_seg8_load_mask};
2343923387

2344023388
/// Lower an interleaved load into a vlsegN intrinsic.
2344123389
///
@@ -23486,10 +23434,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2348623434
};
2348723435

2348823436
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23489-
23490-
CallInst *VlsegN = Builder.CreateIntrinsic(
23491-
FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23492-
{LI->getPointerOperand(), VL});
23437+
// All-ones mask.
23438+
Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23439+
CallInst *VlsegN =
23440+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23441+
{LI->getPointerOperand(), Mask, VL});
2349323442

2349423443
for (unsigned i = 0; i < Shuffles.size(); i++) {
2349523444
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23500,10 +23449,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2350023449
}
2350123450

2350223451
static const Intrinsic::ID FixedVssegIntrIds[] = {
23503-
Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
23504-
Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
23505-
Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
23506-
Intrinsic::riscv_seg8_store};
23452+
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
23453+
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
23454+
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
23455+
Intrinsic::riscv_seg8_store_mask};
2350723456

2350823457
/// Lower an interleaved store into a vssegN intrinsic.
2350923458
///
@@ -23563,8 +23512,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2356323512
}
2356423513

2356523514
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23566-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
23567-
{VTy, SI->getPointerOperandType(), XLenTy});
23515+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
2356823516

2356923517
SmallVector<Value *, 10> Ops;
2357023518
SmallVector<int, 16> NewShuffleMask;
@@ -23584,7 +23532,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2358423532
// potentially under larger LMULs) because we checked that the fixed vector
2358523533
// type fits in isLegalInterleavedAccessType
2358623534
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23587-
Ops.append({SI->getPointerOperand(), VL});
23535+
// All-ones mask.
23536+
Value *StoreMask = ConstantVector::getSplat(
23537+
VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23538+
Ops.append({SI->getPointerOperand(), StoreMask, VL});
2358823539

2358923540
Builder.CreateCall(VssegNFunc, Ops);
2359023541

@@ -23613,10 +23564,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
2361323564

2361423565
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
2361523566
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23567+
// All-ones mask.
23568+
Value *Mask = ConstantVector::getSplat(
23569+
FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
2361623570
Return =
23617-
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
23618-
{ResVTy, LI->getPointerOperandType(), XLenTy},
23619-
{LI->getPointerOperand(), VL});
23571+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23572+
{LI->getPointerOperand(), Mask, VL});
2362023573
} else {
2362123574
static const Intrinsic::ID IntrIds[] = {
2362223575
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -23680,12 +23633,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
2368023633

2368123634
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
2368223635
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23683-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
23684-
{InVTy, SI->getPointerOperandType(), XLenTy});
23636+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
2368523637

2368623638
SmallVector<Value *, 10> Ops(InterleaveValues);
2368723639
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23688-
Ops.append({SI->getPointerOperand(), VL});
23640+
// All-ones mask.
23641+
Value *Mask = ConstantVector::getSplat(
23642+
FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
23643+
Ops.append({SI->getPointerOperand(), Mask, VL});
2368923644

2369023645
Builder.CreateCall(VssegNFunc, Ops);
2369123646
} else {
@@ -23807,15 +23762,9 @@ bool RISCVTargetLowering::lowerDeinterleavedVPLoad(
2380723762

2380823763
Value *Return = nullptr;
2380923764
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
23810-
static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
23811-
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
23812-
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
23813-
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
23814-
Intrinsic::riscv_seg8_load_mask};
23815-
23816-
Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
23817-
{FVTy, XLenTy},
23818-
{Load->getArgOperand(0), Mask, EVL});
23765+
Return =
23766+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
23767+
{Load->getArgOperand(0), Mask, EVL});
2381923768
} else {
2382023769
static const Intrinsic::ID IntrMaskIds[] = {
2382123770
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -23927,16 +23876,10 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
2392723876
XLenTy);
2392823877

2392923878
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
23930-
static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
23931-
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
23932-
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
23933-
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
23934-
Intrinsic::riscv_seg8_store_mask};
23935-
2393623879
SmallVector<Value *, 8> Operands(InterleaveOperands.begin(),
2393723880
InterleaveOperands.end());
2393823881
Operands.append({Store->getArgOperand(1), Mask, EVL});
23939-
Builder.CreateIntrinsic(FixedMaskedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
23882+
Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
2394023883
Operands);
2394123884
return true;
2394223885
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll

+7-14
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ define <8 x i8> @load_factor2(ptr %ptr) {
77
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
88
; CHECK-NEXT: vlseg2e8.v v7, (a0)
99
; CHECK-NEXT: ret
10-
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
10+
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
1111
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
1212
%3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
1313
ret <8 x i8> %3
@@ -19,7 +19,7 @@ define <8 x i8> @load_factor3(ptr %ptr) {
1919
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
2020
; CHECK-NEXT: vlseg3e8.v v6, (a0)
2121
; CHECK-NEXT: ret
22-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
22+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
2323
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
2424
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
2525
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -32,7 +32,7 @@ define <8 x i8> @load_factor4(ptr %ptr) {
3232
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
3333
; CHECK-NEXT: vlseg4e8.v v5, (a0)
3434
; CHECK-NEXT: ret
35-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
35+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
3636
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
3737
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
3838
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -46,7 +46,7 @@ define <8 x i8> @load_factor5(ptr %ptr) {
4646
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
4747
; CHECK-NEXT: vlseg5e8.v v4, (a0)
4848
; CHECK-NEXT: ret
49-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
49+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
5050
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
5151
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
5252
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -61,7 +61,7 @@ define <8 x i8> @load_factor6(ptr %ptr) {
6161
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
6262
; CHECK-NEXT: vlseg6e8.v v3, (a0)
6363
; CHECK-NEXT: ret
64-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
64+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
6565
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
6666
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
6767
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -77,7 +77,7 @@ define <8 x i8> @load_factor7(ptr %ptr) {
7777
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
7878
; CHECK-NEXT: vlseg7e8.v v2, (a0)
7979
; CHECK-NEXT: ret
80-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
80+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
8181
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
8282
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
8383
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -94,7 +94,7 @@ define <8 x i8> @load_factor8(ptr %ptr) {
9494
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
9595
; CHECK-NEXT: vlseg8e8.v v1, (a0)
9696
; CHECK-NEXT: ret
97-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
97+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
9898
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
9999
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
100100
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -105,10 +105,3 @@ define <8 x i8> @load_factor8(ptr %ptr) {
105105
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 7
106106
ret <8 x i8> %9
107107
}
108-
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr, i64)
109-
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr, i64)
110-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr, i64)
111-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr, i64)
112-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr, i64)
113-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr, i64)
114-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr, i64)

0 commit comments

Comments
 (0)