forked from llvm/llvm-project
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathRewriteStatepointsForGC.cpp
3382 lines (2973 loc) · 134 KB
/
RewriteStatepointsForGC.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Rewrite call/invoke instructions so as to make potential relocations
// performed by the garbage collector explicit in the IR.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GCStrategy.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#define DEBUG_TYPE "rewrite-statepoints-for-gc"
using namespace llvm;
// Print the liveset found at the insert location
static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
cl::init(false));
static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
cl::init(false));
// Print out the base pointers for debugging
static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
cl::init(false));
// Cost threshold measuring when it is profitable to rematerialize value instead
// of relocating it
static cl::opt<unsigned>
RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
cl::init(6));
#ifdef EXPENSIVE_CHECKS
static bool ClobberNonLive = true;
#else
static bool ClobberNonLive = false;
#endif
static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
cl::location(ClobberNonLive),
cl::Hidden);
static cl::opt<bool>
AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
cl::Hidden, cl::init(true));
static cl::opt<bool> RematDerivedAtUses("rs4gc-remat-derived-at-uses",
cl::Hidden, cl::init(true));
/// The IR fed into RewriteStatepointsForGC may have had attributes and
/// metadata implying dereferenceability that are no longer valid/correct after
/// RewriteStatepointsForGC has run. This is because semantically, after
/// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
/// heap. stripNonValidData (conservatively) restores
/// correctness by erasing all attributes in the module that externally imply
/// dereferenceability. Similar reasoning also applies to the noalias
/// attributes and metadata. gc.statepoint can touch the entire heap including
/// noalias objects.
/// Apart from attributes and metadata, we also remove instructions that imply
/// constant physical memory: llvm.invariant.start.
static void stripNonValidData(Module &M);
// Find the GC strategy for a function, or null if it doesn't have one.
static std::unique_ptr<GCStrategy> findGCStrategy(Function &F);
static bool shouldRewriteStatepointsIn(Function &F);
PreservedAnalyses RewriteStatepointsForGC::run(Module &M,
ModuleAnalysisManager &AM) {
bool Changed = false;
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
for (Function &F : M) {
// Nothing to do for declarations.
if (F.isDeclaration() || F.empty())
continue;
// Policy choice says not to rewrite - the most common reason is that we're
// compiling code without a GCStrategy.
if (!shouldRewriteStatepointsIn(F))
continue;
auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
Changed |= runOnFunction(F, DT, TTI, TLI);
}
if (!Changed)
return PreservedAnalyses::all();
// stripNonValidData asserts that shouldRewriteStatepointsIn
// returns true for at least one function in the module. Since at least
// one function changed, we know that the precondition is satisfied.
stripNonValidData(M);
PreservedAnalyses PA;
PA.preserve<TargetIRAnalysis>();
PA.preserve<TargetLibraryAnalysis>();
return PA;
}
namespace {
struct GCPtrLivenessData {
/// Values defined in this block.
MapVector<BasicBlock *, SetVector<Value *>> KillSet;
/// Values used in this block (and thus live); does not included values
/// killed within this block.
MapVector<BasicBlock *, SetVector<Value *>> LiveSet;
/// Values live into this basic block (i.e. used by any
/// instruction in this basic block or ones reachable from here)
MapVector<BasicBlock *, SetVector<Value *>> LiveIn;
/// Values live out of this basic block (i.e. live into
/// any successor block)
MapVector<BasicBlock *, SetVector<Value *>> LiveOut;
};
// The type of the internal cache used inside the findBasePointers family
// of functions. From the callers perspective, this is an opaque type and
// should not be inspected.
//
// In the actual implementation this caches two relations:
// - The base relation itself (i.e. this pointer is based on that one)
// - The base defining value relation (i.e. before base_phi insertion)
// Generally, after the execution of a full findBasePointer call, only the
// base relation will remain. Internally, we add a mixture of the two
// types, then update all the second type to the first type
using DefiningValueMapTy = MapVector<Value *, Value *>;
using IsKnownBaseMapTy = MapVector<Value *, bool>;
using PointerToBaseTy = MapVector<Value *, Value *>;
using StatepointLiveSetTy = SetVector<Value *>;
using RematerializedValueMapTy =
MapVector<AssertingVH<Instruction>, AssertingVH<Value>>;
struct PartiallyConstructedSafepointRecord {
/// The set of values known to be live across this safepoint
StatepointLiveSetTy LiveSet;
/// The *new* gc.statepoint instruction itself. This produces the token
/// that normal path gc.relocates and the gc.result are tied to.
GCStatepointInst *StatepointToken;
/// Instruction to which exceptional gc relocates are attached
/// Makes it easier to iterate through them during relocationViaAlloca.
Instruction *UnwindToken;
/// Record live values we are rematerialized instead of relocating.
/// They are not included into 'LiveSet' field.
/// Maps rematerialized copy to it's original value.
RematerializedValueMapTy RematerializedValues;
};
struct RematerizlizationCandidateRecord {
// Chain from derived pointer to base.
SmallVector<Instruction *, 3> ChainToBase;
// Original base.
Value *RootOfChain;
// Cost of chain.
InstructionCost Cost;
};
using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>;
} // end anonymous namespace
static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
std::optional<OperandBundleUse> DeoptBundle =
Call->getOperandBundle(LLVMContext::OB_deopt);
if (!DeoptBundle) {
assert(AllowStatepointWithNoDeoptInfo &&
"Found non-leaf call without deopt info!");
return {};
}
return DeoptBundle->Inputs;
}
/// Compute the live-in set for every basic block in the function
static void computeLiveInValues(DominatorTree &DT, Function &F,
GCPtrLivenessData &Data, GCStrategy *GC);
/// Given results from the dataflow liveness computation, find the set of live
/// Values at a particular instruction.
static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
StatepointLiveSetTy &out, GCStrategy *GC);
static bool isGCPointerType(Type *T, GCStrategy *GC) {
assert(GC && "GC Strategy for isGCPointerType cannot be null");
if (!isa<PointerType>(T))
return false;
// conservative - same as StatepointLowering
return GC->isGCManagedPointer(T).value_or(true);
}
// Return true if this type is one which a) is a gc pointer or contains a GC
// pointer and b) is of a type this code expects to encounter as a live value.
// (The insertion code will assert that a type which matches (a) and not (b)
// is not encountered.)
static bool isHandledGCPointerType(Type *T, GCStrategy *GC) {
// We fully support gc pointers
if (isGCPointerType(T, GC))
return true;
// We partially support vectors of gc pointers. The code will assert if it
// can't handle something.
if (auto VT = dyn_cast<VectorType>(T))
if (isGCPointerType(VT->getElementType(), GC))
return true;
return false;
}
#ifndef NDEBUG
/// Returns true if this type contains a gc pointer whether we know how to
/// handle that type or not.
static bool containsGCPtrType(Type *Ty, GCStrategy *GC) {
if (isGCPointerType(Ty, GC))
return true;
if (VectorType *VT = dyn_cast<VectorType>(Ty))
return isGCPointerType(VT->getScalarType(), GC);
if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
return containsGCPtrType(AT->getElementType(), GC);
if (StructType *ST = dyn_cast<StructType>(Ty))
return llvm::any_of(ST->elements(),
[GC](Type *Ty) { return containsGCPtrType(Ty, GC); });
return false;
}
// Returns true if this is a type which a) is a gc pointer or contains a GC
// pointer and b) is of a type which the code doesn't expect (i.e. first class
// aggregates). Used to trip assertions.
static bool isUnhandledGCPointerType(Type *Ty, GCStrategy *GC) {
return containsGCPtrType(Ty, GC) && !isHandledGCPointerType(Ty, GC);
}
#endif
// Return the name of the value suffixed with the provided value, or if the
// value didn't have a name, the default value specified.
static std::string suffixed_name_or(Value *V, StringRef Suffix,
StringRef DefaultName) {
return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
}
// Conservatively identifies any definitions which might be live at the
// given instruction. The analysis is performed immediately before the
// given instruction. Values defined by that instruction are not considered
// live. Values used by that instruction are considered live.
static void analyzeParsePointLiveness(
DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
PartiallyConstructedSafepointRecord &Result, GCStrategy *GC) {
StatepointLiveSetTy LiveSet;
findLiveSetAtInst(Call, OriginalLivenessData, LiveSet, GC);
if (PrintLiveSet) {
dbgs() << "Live Variables:\n";
for (Value *V : LiveSet)
dbgs() << " " << V->getName() << " " << *V << "\n";
}
if (PrintLiveSetSize) {
dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
dbgs() << "Number live values: " << LiveSet.size() << "\n";
}
Result.LiveSet = LiveSet;
}
/// Returns true if V is a known base.
static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases);
/// Caches the IsKnownBase flag for a value and asserts that it wasn't present
/// in the cache before.
static void setKnownBase(Value *V, bool IsKnownBase,
IsKnownBaseMapTy &KnownBases);
static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases);
/// Return a base defining value for the 'Index' element of the given vector
/// instruction 'I'. If Index is null, returns a BDV for the entire vector
/// 'I'. As an optimization, this method will try to determine when the
/// element is known to already be a base pointer. If this can be established,
/// the second value in the returned pair will be true. Note that either a
/// vector or a pointer typed value can be returned. For the former, the
/// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
/// If the later, the return pointer is a BDV (or possibly a base) for the
/// particular element in 'I'.
static Value *findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases) {
// Each case parallels findBaseDefiningValue below, see that code for
// detailed motivation.
auto Cached = Cache.find(I);
if (Cached != Cache.end())
return Cached->second;
if (isa<Argument>(I)) {
// An incoming argument to the function is a base pointer
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (isa<Constant>(I)) {
// Base of constant vector consists only of constant null pointers.
// For reasoning see similar case inside 'findBaseDefiningValue' function.
auto *CAZ = ConstantAggregateZero::get(I->getType());
Cache[I] = CAZ;
setKnownBase(CAZ, /* IsKnownBase */true, KnownBases);
return CAZ;
}
if (isa<LoadInst>(I)) {
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (isa<InsertElementInst>(I)) {
// We don't know whether this vector contains entirely base pointers or
// not. To be conservatively correct, we treat it as a BDV and will
// duplicate code as needed to construct a parallel vector of bases.
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */false, KnownBases);
return I;
}
if (isa<ShuffleVectorInst>(I)) {
// We don't know whether this vector contains entirely base pointers or
// not. To be conservatively correct, we treat it as a BDV and will
// duplicate code as needed to construct a parallel vector of bases.
// TODO: There a number of local optimizations which could be applied here
// for particular sufflevector patterns.
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */false, KnownBases);
return I;
}
// The behavior of getelementptr instructions is the same for vector and
// non-vector data types.
if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
auto *BDV =
findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
Cache[GEP] = BDV;
return BDV;
}
// The behavior of freeze instructions is the same for vector and
// non-vector data types.
if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
Cache[Freeze] = BDV;
return BDV;
}
// If the pointer comes through a bitcast of a vector of pointers to
// a vector of another type of pointer, then look through the bitcast
if (auto *BC = dyn_cast<BitCastInst>(I)) {
auto *BDV = findBaseDefiningValue(BC->getOperand(0), Cache, KnownBases);
Cache[BC] = BDV;
return BDV;
}
// We assume that functions in the source language only return base
// pointers. This should probably be generalized via attributes to support
// both source language and internal functions.
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
// A PHI or Select is a base defining value. The outer findBasePointer
// algorithm is responsible for constructing a base value for this BDV.
assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
"unknown vector instruction - no base found for vector element");
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */false, KnownBases);
return I;
}
/// Helper function for findBasePointer - Will return a value which either a)
/// defines the base pointer for the input, b) blocks the simple search
/// (i.e. a PHI or Select of two derived pointers), or c) involves a change
/// from pointer to vector type or back.
static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases) {
assert(I->getType()->isPtrOrPtrVectorTy() &&
"Illegal to ask for the base pointer of a non-pointer type");
auto Cached = Cache.find(I);
if (Cached != Cache.end())
return Cached->second;
if (I->getType()->isVectorTy())
return findBaseDefiningValueOfVector(I, Cache, KnownBases);
if (isa<Argument>(I)) {
// An incoming argument to the function is a base pointer
// We should have never reached here if this argument isn't an gc value
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (isa<Constant>(I)) {
// We assume that objects with a constant base (e.g. a global) can't move
// and don't need to be reported to the collector because they are always
// live. Besides global references, all kinds of constants (e.g. undef,
// constant expressions, null pointers) can be introduced by the inliner or
// the optimizer, especially on dynamically dead paths.
// Here we treat all of them as having single null base. By doing this we
// trying to avoid problems reporting various conflicts in a form of
// "phi (const1, const2)" or "phi (const, regular gc ptr)".
// See constant.ll file for relevant test cases.
auto *CPN = ConstantPointerNull::get(cast<PointerType>(I->getType()));
Cache[I] = CPN;
setKnownBase(CPN, /* IsKnownBase */true, KnownBases);
return CPN;
}
// inttoptrs in an integral address space are currently ill-defined. We
// treat them as defining base pointers here for consistency with the
// constant rule above and because we don't really have a better semantic
// to give them. Note that the optimizer is always free to insert undefined
// behavior on dynamically dead paths as well.
if (isa<IntToPtrInst>(I)) {
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (CastInst *CI = dyn_cast<CastInst>(I)) {
Value *Def = CI->stripPointerCasts();
// If stripping pointer casts changes the address space there is an
// addrspacecast in between.
assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
cast<PointerType>(CI->getType())->getAddressSpace() &&
"unsupported addrspacecast");
// If we find a cast instruction here, it means we've found a cast which is
// not simply a pointer cast (i.e. an inttoptr). We don't know how to
// handle int->ptr conversion.
assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
auto *BDV = findBaseDefiningValue(Def, Cache, KnownBases);
Cache[CI] = BDV;
return BDV;
}
if (isa<LoadInst>(I)) {
// The value loaded is an gc base itself
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
// The base of this GEP is the base
auto *BDV =
findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
Cache[GEP] = BDV;
return BDV;
}
if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
Cache[Freeze] = BDV;
return BDV;
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default:
// fall through to general call handling
break;
case Intrinsic::experimental_gc_statepoint:
llvm_unreachable("statepoints don't produce pointers");
case Intrinsic::experimental_gc_relocate:
// Rerunning safepoint insertion after safepoints are already
// inserted is not supported. It could probably be made to work,
// but why are you doing this? There's no good reason.
llvm_unreachable("repeat safepoint insertion is not supported");
case Intrinsic::gcroot:
// Currently, this mechanism hasn't been extended to work with gcroot.
// There's no reason it couldn't be, but I haven't thought about the
// implications much.
llvm_unreachable(
"interaction with the gcroot mechanism is not supported");
case Intrinsic::experimental_gc_get_pointer_base:
auto *BDV = findBaseDefiningValue(II->getOperand(0), Cache, KnownBases);
Cache[II] = BDV;
return BDV;
}
}
// We assume that functions in the source language only return base
// pointers. This should probably be generalized via attributes to support
// both source language and internal functions.
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
// TODO: I have absolutely no idea how to implement this part yet. It's not
// necessarily hard, I just haven't really looked at it yet.
assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
if (isa<AtomicCmpXchgInst>(I)) {
// A CAS is effectively a atomic store and load combined under a
// predicate. From the perspective of base pointers, we just treat it
// like a load.
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
if (isa<AtomicRMWInst>(I)) {
assert(cast<AtomicRMWInst>(I)->getOperation() == AtomicRMWInst::Xchg &&
"Only Xchg is allowed for pointer values");
// A RMW Xchg is a combined atomic load and store, so we can treat the
// loaded value as a base pointer.
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */ true, KnownBases);
return I;
}
// The aggregate ops. Aggregates can either be in the heap or on the
// stack, but in either case, this is simply a field load. As a result,
// this is a defining definition of the base just like a load is.
if (isa<ExtractValueInst>(I)) {
Cache[I] = I;
setKnownBase(I, /* IsKnownBase */true, KnownBases);
return I;
}
// We should never see an insert vector since that would require we be
// tracing back a struct value not a pointer value.
assert(!isa<InsertValueInst>(I) &&
"Base pointer for a struct is meaningless");
// This value might have been generated by findBasePointer() called when
// substituting gc.get.pointer.base() intrinsic.
bool IsKnownBase =
isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value");
setKnownBase(I, /* IsKnownBase */IsKnownBase, KnownBases);
Cache[I] = I;
// An extractelement produces a base result exactly when it's input does.
// We may need to insert a parallel instruction to extract the appropriate
// element out of the base vector corresponding to the input. Given this,
// it's analogous to the phi and select case even though it's not a merge.
if (isa<ExtractElementInst>(I))
// Note: There a lot of obvious peephole cases here. This are deliberately
// handled after the main base pointer inference algorithm to make writing
// test cases to exercise that code easier.
return I;
// The last two cases here don't return a base pointer. Instead, they
// return a value which dynamically selects from among several base
// derived pointers (each with it's own base potentially). It's the job of
// the caller to resolve these.
assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
"missing instruction case in findBaseDefiningValue");
return I;
}
/// Returns the base defining value for this value.
static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases) {
if (!Cache.contains(I)) {
auto *BDV = findBaseDefiningValue(I, Cache, KnownBases);
Cache[I] = BDV;
LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
<< Cache[I]->getName() << ", is known base = "
<< KnownBases[I] << "\n");
}
assert(Cache[I] != nullptr);
assert(KnownBases.contains(Cache[I]) &&
"Cached value must be present in known bases map");
return Cache[I];
}
/// Return a base pointer for this value if known. Otherwise, return it's
/// base defining value.
static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases) {
Value *Def = findBaseDefiningValueCached(I, Cache, KnownBases);
auto Found = Cache.find(Def);
if (Found != Cache.end()) {
// Either a base-of relation, or a self reference. Caller must check.
return Found->second;
}
// Only a BDV available
return Def;
}
#ifndef NDEBUG
/// This value is a base pointer that is not generated by RS4GC, i.e. it already
/// exists in the code.
static bool isOriginalBaseResult(Value *V) {
// no recursion possible
return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
!isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
!isa<ShuffleVectorInst>(V);
}
#endif
static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases) {
auto It = KnownBases.find(V);
assert(It != KnownBases.end() && "Value not present in the map");
return It->second;
}
static void setKnownBase(Value *V, bool IsKnownBase,
IsKnownBaseMapTy &KnownBases) {
#ifndef NDEBUG
auto It = KnownBases.find(V);
if (It != KnownBases.end())
assert(It->second == IsKnownBase && "Changing already present value");
#endif
KnownBases[V] = IsKnownBase;
}
// Returns true if First and Second values are both scalar or both vector.
static bool areBothVectorOrScalar(Value *First, Value *Second) {
return isa<VectorType>(First->getType()) ==
isa<VectorType>(Second->getType());
}
namespace {
/// Models the state of a single base defining value in the findBasePointer
/// algorithm for determining where a new instruction is needed to propagate
/// the base of this BDV.
class BDVState {
public:
enum StatusTy {
// Starting state of lattice
Unknown,
// Some specific base value -- does *not* mean that instruction
// propagates the base of the object
// ex: gep %arg, 16 -> %arg is the base value
Base,
// Need to insert a node to represent a merge.
Conflict
};
BDVState() {
llvm_unreachable("missing state in map");
}
explicit BDVState(Value *OriginalValue)
: OriginalValue(OriginalValue) {}
explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
: OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
assert(Status != Base || BaseValue);
}
StatusTy getStatus() const { return Status; }
Value *getOriginalValue() const { return OriginalValue; }
Value *getBaseValue() const { return BaseValue; }
bool isBase() const { return getStatus() == Base; }
bool isUnknown() const { return getStatus() == Unknown; }
bool isConflict() const { return getStatus() == Conflict; }
// Values of type BDVState form a lattice, and this function implements the
// meet
// operation.
void meet(const BDVState &Other) {
auto markConflict = [&]() {
Status = BDVState::Conflict;
BaseValue = nullptr;
};
// Conflict is a final state.
if (isConflict())
return;
// if we are not known - just take other state.
if (isUnknown()) {
Status = Other.getStatus();
BaseValue = Other.getBaseValue();
return;
}
// We are base.
assert(isBase() && "Unknown state");
// If other is unknown - just keep our state.
if (Other.isUnknown())
return;
// If other is conflict - it is a final state.
if (Other.isConflict())
return markConflict();
// Other is base as well.
assert(Other.isBase() && "Unknown state");
// If bases are different - Conflict.
if (getBaseValue() != Other.getBaseValue())
return markConflict();
// We are identical, do nothing.
}
bool operator==(const BDVState &Other) const {
return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue &&
Status == Other.Status;
}
bool operator!=(const BDVState &other) const { return !(*this == other); }
LLVM_DUMP_METHOD
void dump() const {
print(dbgs());
dbgs() << '\n';
}
void print(raw_ostream &OS) const {
switch (getStatus()) {
case Unknown:
OS << "U";
break;
case Base:
OS << "B";
break;
case Conflict:
OS << "C";
break;
}
OS << " (base " << getBaseValue() << " - "
<< (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
<< " for " << OriginalValue->getName() << ":";
}
private:
AssertingVH<Value> OriginalValue; // instruction this state corresponds to
StatusTy Status = Unknown;
AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
};
} // end anonymous namespace
#ifndef NDEBUG
static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
State.print(OS);
return OS;
}
#endif
/// For a given value or instruction, figure out what base ptr its derived from.
/// For gc objects, this is simply itself. On success, returns a value which is
/// the base pointer. (This is reliable and can be used for relocation.) On
/// failure, returns nullptr.
static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache,
IsKnownBaseMapTy &KnownBases) {
Value *Def = findBaseOrBDV(I, Cache, KnownBases);
if (isKnownBase(Def, KnownBases) && areBothVectorOrScalar(Def, I))
return Def;
// Here's the rough algorithm:
// - For every SSA value, construct a mapping to either an actual base
// pointer or a PHI which obscures the base pointer.
// - Construct a mapping from PHI to unknown TOP state. Use an
// optimistic algorithm to propagate base pointer information. Lattice
// looks like:
// UNKNOWN
// b1 b2 b3 b4
// CONFLICT
// When algorithm terminates, all PHIs will either have a single concrete
// base or be in a conflict state.
// - For every conflict, insert a dummy PHI node without arguments. Add
// these to the base[Instruction] = BasePtr mapping. For every
// non-conflict, add the actual base.
// - For every conflict, add arguments for the base[a] of each input
// arguments.
//
// Note: A simpler form of this would be to add the conflict form of all
// PHIs without running the optimistic algorithm. This would be
// analogous to pessimistic data flow and would likely lead to an
// overall worse solution.
#ifndef NDEBUG
auto isExpectedBDVType = [](Value *BDV) {
return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
isa<ShuffleVectorInst>(BDV);
};
#endif
// Once populated, will contain a mapping from each potentially non-base BDV
// to a lattice value (described above) which corresponds to that BDV.
// We use the order of insertion (DFS over the def/use graph) to provide a
// stable deterministic ordering for visiting DenseMaps (which are unordered)
// below. This is important for deterministic compilation.
MapVector<Value *, BDVState> States;
#ifndef NDEBUG
auto VerifyStates = [&]() {
for (auto &Entry : States) {
assert(Entry.first == Entry.second.getOriginalValue());
}
};
#endif
auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
for (Value *InVal : PN->incoming_values())
F(InVal);
} else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
F(SI->getTrueValue());
F(SI->getFalseValue());
} else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
F(EE->getVectorOperand());
} else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
F(IE->getOperand(0));
F(IE->getOperand(1));
} else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
// For a canonical broadcast, ignore the undef argument
// (without this, we insert a parallel base shuffle for every broadcast)
F(SV->getOperand(0));
if (!SV->isZeroEltSplat())
F(SV->getOperand(1));
} else {
llvm_unreachable("unexpected BDV type");
}
};
// Recursively fill in all base defining values reachable from the initial
// one for which we don't already know a definite base value for
/* scope */ {
SmallVector<Value*, 16> Worklist;
Worklist.push_back(Def);
States.insert({Def, BDVState(Def)});
while (!Worklist.empty()) {
Value *Current = Worklist.pop_back_val();
assert(!isOriginalBaseResult(Current) && "why did it get added?");
auto visitIncomingValue = [&](Value *InVal) {
Value *Base = findBaseOrBDV(InVal, Cache, KnownBases);
if (isKnownBase(Base, KnownBases) && areBothVectorOrScalar(Base, InVal))
// Known bases won't need new instructions introduced and can be
// ignored safely. However, this can only be done when InVal and Base
// are both scalar or both vector. Otherwise, we need to find a
// correct BDV for InVal, by creating an entry in the lattice
// (States).
return;
assert(isExpectedBDVType(Base) && "the only non-base values "
"we see should be base defining values");
if (States.insert(std::make_pair(Base, BDVState(Base))).second)
Worklist.push_back(Base);
};
visitBDVOperands(Current, visitIncomingValue);
}
}
#ifndef NDEBUG
VerifyStates();
LLVM_DEBUG(dbgs() << "States after initialization:\n");
for (const auto &Pair : States) {
LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
}
#endif
// Iterate forward through the value graph pruning any node from the state
// list where all of the inputs are base pointers. The purpose of this is to
// reuse existing values when the derived pointer we were asked to materialize
// a base pointer for happens to be a base pointer itself. (Or a sub-graph
// feeding it does.)
SmallVector<Value *> ToRemove;
do {
ToRemove.clear();
for (auto Pair : States) {
Value *BDV = Pair.first;
auto canPruneInput = [&](Value *V) {
// If the input of the BDV is the BDV itself we can prune it. This is
// only possible if the BDV is a PHI node.
if (V->stripPointerCasts() == BDV)
return true;
Value *VBDV = findBaseOrBDV(V, Cache, KnownBases);
if (V->stripPointerCasts() != VBDV)
return false;
// The assumption is that anything not in the state list is
// propagates a base pointer.
return States.count(VBDV) == 0;
};
bool CanPrune = true;
visitBDVOperands(BDV, [&](Value *Op) {
CanPrune = CanPrune && canPruneInput(Op);
});
if (CanPrune)
ToRemove.push_back(BDV);
}
for (Value *V : ToRemove) {
States.erase(V);
// Cache the fact V is it's own base for later usage.
Cache[V] = V;
}
} while (!ToRemove.empty());
// Did we manage to prove that Def itself must be a base pointer?
if (!States.count(Def))
return Def;
// Return a phi state for a base defining value. We'll generate a new
// base state for known bases and expect to find a cached state otherwise.
auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
auto I = States.find(BaseValue);
if (I != States.end())
return I->second;
assert(areBothVectorOrScalar(BaseValue, Input));
return BDVState(BaseValue, BDVState::Base, BaseValue);
};
// Even though we have identified a concrete base (or a conflict) for all live
// pointers at this point, there are cases where the base is of an
// incompatible type compared to the original instruction. We conservatively
// mark those as conflicts to ensure that corresponding BDVs will be generated
// in the next steps.
// this is a rather explicit check for all cases where we should mark the
// state as a conflict to force the latter stages of the algorithm to emit
// the BDVs.
// TODO: in many cases the instructions emited for the conflicting states
// will be identical to the I itself (if the I's operate on their BDVs
// themselves). We should exploit this, but can't do it here since it would
// break the invariant about the BDVs not being known to be a base.
// TODO: the code also does not handle constants at all - the algorithm relies
// on all constants having the same BDV and therefore constant-only insns
// will never be in conflict, but this check is ignored here. If the
// constant conflicts will be to BDVs themselves, they will be identical
// instructions and will get optimized away (as in the above TODO)
auto MarkConflict = [&](Instruction *I, Value *BaseValue) {
// II and EE mixes vector & scalar so is always a conflict
if (isa<InsertElementInst>(I) || isa<ExtractElementInst>(I))
return true;
// Shuffle vector is always a conflict as it creates new vector from
// existing ones.
if (isa<ShuffleVectorInst>(I))