@@ -1724,13 +1724,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1724
1724
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1725
1725
MachineMemOperand::MOVolatile;
1726
1726
return true;
1727
- case Intrinsic::riscv_seg2_load:
1728
- case Intrinsic::riscv_seg3_load:
1729
- case Intrinsic::riscv_seg4_load:
1730
- case Intrinsic::riscv_seg5_load:
1731
- case Intrinsic::riscv_seg6_load:
1732
- case Intrinsic::riscv_seg7_load:
1733
- case Intrinsic::riscv_seg8_load:
1734
1727
case Intrinsic::riscv_seg2_load_mask:
1735
1728
case Intrinsic::riscv_seg3_load_mask:
1736
1729
case Intrinsic::riscv_seg4_load_mask:
@@ -1740,17 +1733,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1740
1733
case Intrinsic::riscv_seg8_load_mask:
1741
1734
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
1742
1735
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1743
- case Intrinsic::riscv_seg2_store:
1744
- case Intrinsic::riscv_seg3_store:
1745
- case Intrinsic::riscv_seg4_store:
1746
- case Intrinsic::riscv_seg5_store:
1747
- case Intrinsic::riscv_seg6_store:
1748
- case Intrinsic::riscv_seg7_store:
1749
- case Intrinsic::riscv_seg8_store:
1750
- // Operands are (vec, ..., vec, ptr, vl)
1751
- return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1752
- /*IsStore*/ true,
1753
- /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1754
1736
case Intrinsic::riscv_seg2_store_mask:
1755
1737
case Intrinsic::riscv_seg3_store_mask:
1756
1738
case Intrinsic::riscv_seg4_store_mask:
@@ -10462,13 +10444,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
10462
10444
switch (IntNo) {
10463
10445
default:
10464
10446
break;
10465
- case Intrinsic::riscv_seg2_load:
10466
- case Intrinsic::riscv_seg3_load:
10467
- case Intrinsic::riscv_seg4_load:
10468
- case Intrinsic::riscv_seg5_load:
10469
- case Intrinsic::riscv_seg6_load:
10470
- case Intrinsic::riscv_seg7_load:
10471
- case Intrinsic::riscv_seg8_load:
10472
10447
case Intrinsic::riscv_seg2_load_mask:
10473
10448
case Intrinsic::riscv_seg3_load_mask:
10474
10449
case Intrinsic::riscv_seg4_load_mask:
@@ -10491,12 +10466,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
10491
10466
ContainerVT.getScalarSizeInBits();
10492
10467
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
10493
10468
10494
- // Masked: (pointer, mask, vl)
10495
- // Non-masked: (pointer, vl)
10496
- bool IsMasked = Op.getNumOperands() > 4;
10469
+ // Operands: (chain, int_id, pointer, mask, vl)
10497
10470
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10498
- SDValue Mask =
10499
- IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10471
+ SDValue Mask = Op.getOperand(3);
10500
10472
MVT MaskVT = Mask.getSimpleValueType();
10501
10473
if (MaskVT.isFixedLengthVector()) {
10502
10474
MVT MaskContainerVT =
@@ -10570,13 +10542,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10570
10542
switch (IntNo) {
10571
10543
default:
10572
10544
break;
10573
- case Intrinsic::riscv_seg2_store:
10574
- case Intrinsic::riscv_seg3_store:
10575
- case Intrinsic::riscv_seg4_store:
10576
- case Intrinsic::riscv_seg5_store:
10577
- case Intrinsic::riscv_seg6_store:
10578
- case Intrinsic::riscv_seg7_store:
10579
- case Intrinsic::riscv_seg8_store:
10580
10545
case Intrinsic::riscv_seg2_store_mask:
10581
10546
case Intrinsic::riscv_seg3_store_mask:
10582
10547
case Intrinsic::riscv_seg4_store_mask:
@@ -10591,24 +10556,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10591
10556
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
10592
10557
Intrinsic::riscv_vsseg8_mask};
10593
10558
10594
- bool IsMasked = false;
10595
- switch (IntNo) {
10596
- case Intrinsic::riscv_seg2_store_mask:
10597
- case Intrinsic::riscv_seg3_store_mask:
10598
- case Intrinsic::riscv_seg4_store_mask:
10599
- case Intrinsic::riscv_seg5_store_mask:
10600
- case Intrinsic::riscv_seg6_store_mask:
10601
- case Intrinsic::riscv_seg7_store_mask:
10602
- case Intrinsic::riscv_seg8_store_mask:
10603
- IsMasked = true;
10604
- break;
10605
- default:
10606
- break;
10607
- }
10608
-
10609
- // Non-masked: (chain, int_id, vec*, ptr, vl)
10610
- // Masked: (chain, int_id, vec*, ptr, mask, vl)
10611
- unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10559
+ // Operands: (chain, int_id, vec*, ptr, mask, vl)
10560
+ unsigned NF = Op->getNumOperands() - 5;
10612
10561
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
10613
10562
MVT XLenVT = Subtarget.getXLenVT();
10614
10563
MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10618,8 +10567,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10618
10567
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
10619
10568
10620
10569
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10621
- SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10622
- : getAllOnesMask(ContainerVT, VL, DL, DAG);
10570
+ SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
10623
10571
MVT MaskVT = Mask.getSimpleValueType();
10624
10572
if (MaskVT.isFixedLengthVector()) {
10625
10573
MVT MaskContainerVT =
@@ -23432,10 +23380,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
23432
23380
}
23433
23381
23434
23382
static const Intrinsic::ID FixedVlsegIntrIds[] = {
23435
- Intrinsic::riscv_seg2_load , Intrinsic::riscv_seg3_load ,
23436
- Intrinsic::riscv_seg4_load , Intrinsic::riscv_seg5_load ,
23437
- Intrinsic::riscv_seg6_load , Intrinsic::riscv_seg7_load ,
23438
- Intrinsic::riscv_seg8_load };
23383
+ Intrinsic::riscv_seg2_load_mask , Intrinsic::riscv_seg3_load_mask ,
23384
+ Intrinsic::riscv_seg4_load_mask , Intrinsic::riscv_seg5_load_mask ,
23385
+ Intrinsic::riscv_seg6_load_mask , Intrinsic::riscv_seg7_load_mask ,
23386
+ Intrinsic::riscv_seg8_load_mask };
23439
23387
23440
23388
/// Lower an interleaved load into a vlsegN intrinsic.
23441
23389
///
@@ -23486,10 +23434,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
23486
23434
};
23487
23435
23488
23436
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23489
-
23490
- CallInst *VlsegN = Builder.CreateIntrinsic(
23491
- FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23492
- {LI->getPointerOperand(), VL});
23437
+ // All-ones mask.
23438
+ Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23439
+ CallInst *VlsegN =
23440
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23441
+ {LI->getPointerOperand(), Mask, VL});
23493
23442
23494
23443
for (unsigned i = 0; i < Shuffles.size(); i++) {
23495
23444
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23500,10 +23449,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
23500
23449
}
23501
23450
23502
23451
static const Intrinsic::ID FixedVssegIntrIds[] = {
23503
- Intrinsic::riscv_seg2_store , Intrinsic::riscv_seg3_store ,
23504
- Intrinsic::riscv_seg4_store , Intrinsic::riscv_seg5_store ,
23505
- Intrinsic::riscv_seg6_store , Intrinsic::riscv_seg7_store ,
23506
- Intrinsic::riscv_seg8_store };
23452
+ Intrinsic::riscv_seg2_store_mask , Intrinsic::riscv_seg3_store_mask ,
23453
+ Intrinsic::riscv_seg4_store_mask , Intrinsic::riscv_seg5_store_mask ,
23454
+ Intrinsic::riscv_seg6_store_mask , Intrinsic::riscv_seg7_store_mask ,
23455
+ Intrinsic::riscv_seg8_store_mask };
23507
23456
23508
23457
/// Lower an interleaved store into a vssegN intrinsic.
23509
23458
///
@@ -23563,8 +23512,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
23563
23512
}
23564
23513
23565
23514
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23566
- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23567
- {VTy, SI->getPointerOperandType(), XLenTy});
23515
+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
23568
23516
23569
23517
SmallVector<Value *, 10> Ops;
23570
23518
SmallVector<int, 16> NewShuffleMask;
@@ -23584,7 +23532,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
23584
23532
// potentially under larger LMULs) because we checked that the fixed vector
23585
23533
// type fits in isLegalInterleavedAccessType
23586
23534
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23587
- Ops.append({SI->getPointerOperand(), VL});
23535
+ // All-ones mask.
23536
+ Value *StoreMask = ConstantVector::getSplat(
23537
+ VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23538
+ Ops.append({SI->getPointerOperand(), StoreMask, VL});
23588
23539
23589
23540
Builder.CreateCall(VssegNFunc, Ops);
23590
23541
@@ -23613,10 +23564,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
23613
23564
23614
23565
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
23615
23566
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23567
+ // All-ones mask.
23568
+ Value *Mask = ConstantVector::getSplat(
23569
+ FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
23616
23570
Return =
23617
- Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
23618
- {ResVTy, LI->getPointerOperandType(), XLenTy},
23619
- {LI->getPointerOperand(), VL});
23571
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23572
+ {LI->getPointerOperand(), Mask, VL});
23620
23573
} else {
23621
23574
static const Intrinsic::ID IntrIds[] = {
23622
23575
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -23680,12 +23633,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
23680
23633
23681
23634
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
23682
23635
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23683
- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23684
- {InVTy, SI->getPointerOperandType(), XLenTy});
23636
+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
23685
23637
23686
23638
SmallVector<Value *, 10> Ops(InterleaveValues);
23687
23639
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23688
- Ops.append({SI->getPointerOperand(), VL});
23640
+ // All-ones mask.
23641
+ Value *Mask = ConstantVector::getSplat(
23642
+ FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
23643
+ Ops.append({SI->getPointerOperand(), Mask, VL});
23689
23644
23690
23645
Builder.CreateCall(VssegNFunc, Ops);
23691
23646
} else {
@@ -23807,15 +23762,9 @@ bool RISCVTargetLowering::lowerDeinterleavedVPLoad(
23807
23762
23808
23763
Value *Return = nullptr;
23809
23764
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
23810
- static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
23811
- Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
23812
- Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
23813
- Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
23814
- Intrinsic::riscv_seg8_load_mask};
23815
-
23816
- Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
23817
- {FVTy, XLenTy},
23818
- {Load->getArgOperand(0), Mask, EVL});
23765
+ Return =
23766
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
23767
+ {Load->getArgOperand(0), Mask, EVL});
23819
23768
} else {
23820
23769
static const Intrinsic::ID IntrMaskIds[] = {
23821
23770
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -23927,16 +23876,10 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
23927
23876
XLenTy);
23928
23877
23929
23878
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
23930
- static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
23931
- Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
23932
- Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
23933
- Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
23934
- Intrinsic::riscv_seg8_store_mask};
23935
-
23936
23879
SmallVector<Value *, 8> Operands(InterleaveOperands.begin(),
23937
23880
InterleaveOperands.end());
23938
23881
Operands.append({Store->getArgOperand(1), Mask, EVL});
23939
- Builder.CreateIntrinsic(FixedMaskedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
23882
+ Builder.CreateIntrinsic(FixedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
23940
23883
Operands);
23941
23884
return true;
23942
23885
}
0 commit comments