Wiki Roadmap Timeline Tickets New Ticket Source Search Help / Guide About Trac Login

root/gen/passes/GarbageCollect2Stack.cpp

Revision 1571:8d086d552909, 24.6 kB (checked in by Benjamin Kramer <benny.kra@gmail.com>, 15 years ago)

IntegerType? is now contextifed.

Requires llvm >= 78969. resistor says this will be the last context API change :)

Line 
1 #if USE_METADATA
2
3 //===- GarbageCollect2Stack - Optimize calls to the D garbage collector ---===//
4 //
5 //                             The LLVM D Compiler
6 //
7 // This file is distributed under the University of Illinois Open Source
8 // License. See LICENSE.TXT for details.
9 //
10 //===----------------------------------------------------------------------===//
11 //
12 // This file attempts to turn allocations on the garbage-collected heap into
13 // stack allocations.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "gen/metadata.h"
18
19 #define DEBUG_TYPE "dgc2stack"
20
21 #include "Passes.h"
22
23 #include "llvm/Pass.h"
24 #include "llvm/Module.h"
25 #include "llvm/Constants.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Support/CallSite.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/IRBuilder.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/ADT/SmallSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/StringMap.h"
37 #include "llvm/ADT/Statistic.h"
38 #include "llvm/Support/Compiler.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
41 using namespace llvm;
42
43 STATISTIC(NumGcToStack, "Number of calls promoted to constant-size allocas");
44 STATISTIC(NumToDynSize, "Number of calls promoted to dynamically-sized allocas");
45 STATISTIC(NumDeleted, "Number of GC calls deleted because the return value was unused");
46
47
48 namespace {
49     struct Analysis {
50         TargetData& TD;
51         const Module& M;
52         CallGraph* CG;
53         CallGraphNode* CGNode;
54        
55         const Type* getTypeFor(Value* typeinfo) const;
56     };
57 }
58
59 //===----------------------------------------------------------------------===//
60 // Helper functions
61 //===----------------------------------------------------------------------===//
62
63 void EmitMemSet(IRBuilder<>& B, Value* Dst, Value* Val, Value* Len,
64                 const Analysis& A) {
65     Dst = B.CreateBitCast(Dst, PointerType::getUnqual(B.getInt8Ty()));
66    
67     Module *M = B.GetInsertBlock()->getParent()->getParent();
68     const Type* Tys[1];
69     Tys[0] = Len->getType();
70     Function *MemSet = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 1);
71     Value *Align = ConstantInt::get(B.getInt32Ty(), 1);
72    
73     CallSite CS = B.CreateCall4(MemSet, Dst, Val, Len, Align);
74     if (A.CGNode)
75         A.CGNode->addCalledFunction(CS, A.CG->getOrInsertFunction(MemSet));
76 }
77
78 static void EmitMemZero(IRBuilder<>& B, Value* Dst, Value* Len,
79                         const Analysis& A) {
80     EmitMemSet(B, Dst, ConstantInt::get(B.getInt8Ty(), 0), Len, A);
81 }
82
83
84 //===----------------------------------------------------------------------===//
85 // Helpers for specific types of GC calls.
86 //===----------------------------------------------------------------------===//
87
88 namespace {
89     class FunctionInfo {
90     protected:
91         const Type* Ty;
92        
93     public:
94         unsigned TypeInfoArgNr;
95         bool SafeToDelete;
96        
97         // Analyze the current call, filling in some fields. Returns true if
98         // this is an allocation we can stack-allocate.
99         virtual bool analyze(CallSite CS, const Analysis& A) {
100             Value* TypeInfo = CS.getArgument(TypeInfoArgNr);
101             Ty = A.getTypeFor(TypeInfo);
102             return (Ty != NULL);
103         }
104        
105         // Returns the alloca to replace this call.
106         // It will always be inserted before the call.
107         virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) {
108             NumGcToStack++;
109            
110             Instruction* Begin = CS.getCaller()->getEntryBlock().begin();
111             return new AllocaInst(Ty, ".nongc_mem", Begin); // FIXME: align?
112         }
113        
114         FunctionInfo(unsigned typeInfoArgNr, bool safeToDelete)
115         : TypeInfoArgNr(typeInfoArgNr), SafeToDelete(safeToDelete) {}
116     };
117    
118     class ArrayFI : public FunctionInfo {
119         Value* arrSize;
120         int ArrSizeArgNr;
121         bool Initialized;
122        
123     public:
124         ArrayFI(unsigned tiArgNr, bool safeToDelete, bool initialized,
125                 unsigned arrSizeArgNr)
126         : FunctionInfo(tiArgNr, safeToDelete),
127           ArrSizeArgNr(arrSizeArgNr),
128           Initialized(initialized)
129         {}
130        
131         virtual bool analyze(CallSite CS, const Analysis& A) {
132             if (!FunctionInfo::analyze(CS, A))
133                 return false;
134            
135             arrSize = CS.getArgument(ArrSizeArgNr);
136             const IntegerType* SizeType =
137                 dyn_cast<IntegerType>(arrSize->getType());
138             if (!SizeType)
139                 return false;
140             unsigned bits = SizeType->getBitWidth();
141             if (bits > 32) {
142                 // The array size of an alloca must be an i32, so make sure
143                 // the conversion is safe.
144                 APInt Mask = APInt::getHighBitsSet(bits, bits - 32);
145                 APInt KnownZero(bits, 0), KnownOne(bits, 0);
146                 ComputeMaskedBits(arrSize, Mask, KnownZero, KnownOne, &A.TD);
147                 if ((KnownZero & Mask) != Mask)
148                     return false;
149             }
150             // Extract the element type from the array type.
151             const StructType* ArrTy = dyn_cast<StructType>(Ty);
152             assert(ArrTy && "Dynamic array type not a struct?");
153             assert(isa<IntegerType>(ArrTy->getElementType(0)));
154             const PointerType* PtrTy =
155                 cast<PointerType>(ArrTy->getElementType(1));
156             Ty = PtrTy->getElementType();
157             return true;
158         }
159        
160         virtual AllocaInst* promote(CallSite CS, IRBuilder<>& B, const Analysis& A) {
161             IRBuilder<> Builder = B;
162             // If the allocation is of constant size it's best to put it in the
163             // entry block, so do so if we're not already there.
164             // For dynamically-sized allocations it's best to avoid the overhead
165             // of allocating them if possible, so leave those where they are.
166             // While we're at it, update statistics too.
167             if (isa<Constant>(arrSize)) {
168                 BasicBlock& Entry = CS.getCaller()->getEntryBlock();
169                 if (Builder.GetInsertBlock() != &Entry)
170                     Builder.SetInsertPoint(&Entry, Entry.begin());
171                 NumGcToStack++;
172             } else {
173                 NumToDynSize++;
174             }
175            
176             // Convert array size to 32 bits if necessary
177             Value* count = Builder.CreateIntCast(arrSize, Builder.getInt32Ty(), false);
178             AllocaInst* alloca = Builder.CreateAlloca(Ty, count, ".nongc_mem"); // FIXME: align?
179            
180             if (Initialized) {
181                 // For now, only zero-init is supported.
182                 uint64_t size = A.TD.getTypeStoreSize(Ty);
183                 Value* TypeSize = ConstantInt::get(arrSize->getType(), size);
184                 // Use the original B to put initialization at the
185                 // allocation site.
186                 Value* Size = B.CreateMul(TypeSize, arrSize);
187                 EmitMemZero(B, alloca, Size, A);
188             }
189            
190             return alloca;
191         }
192     };
193    
194     // FunctionInfo for _d_allocclass
195     class AllocClassFI : public FunctionInfo {
196         public:
197         virtual bool analyze(CallSite CS, const Analysis& A) {
198             // This call contains no TypeInfo parameter, so don't call the
199             // base class implementation here...
200             if (CS.arg_size() != 1)
201                 return false;
202             Value* arg = CS.getArgument(0)->stripPointerCasts();
203             GlobalVariable* ClassInfo = dyn_cast<GlobalVariable>(arg);
204             if (!ClassInfo)
205                 return false;
206
207             std::string metaname = CD_PREFIX;
208             metaname += ClassInfo->getName();
209
210             NamedMDNode* meta = A.M.getNamedMetadata(metaname);
211             if (!meta)
212                 return false;
213
214             MDNode* node = static_cast<MDNode*>(meta->getElement(0));
215             if (!node || MD_GetNumElements(node) != CD_NumFields)
216                 return false;
217
218             // Inserting destructor calls is not implemented yet, so classes
219             // with destructors are ignored for now.
220             Constant* hasDestructor = dyn_cast<Constant>(MD_GetElement(node, CD_Finalize));
221             // We can't stack-allocate if the class has a custom deallocator
222             // (Custom allocators don't get turned into this runtime call, so
223             // those can be ignored)
224             Constant* hasCustomDelete = dyn_cast<Constant>(MD_GetElement(node, CD_CustomDelete));
225             if (hasDestructor == NULL || hasCustomDelete == NULL)
226                 return false;
227            
228             if (ConstantExpr::getOr(hasDestructor, hasCustomDelete)
229                     != ConstantInt::getFalse(A.M.getContext()))
230                 return false;
231            
232             Ty = MD_GetElement(node, CD_BodyType)->getType();
233             return true;
234         }
235        
236         // The default promote() should be fine.
237        
238         AllocClassFI() : FunctionInfo(~0u, true) {}
239     };
240 }
241
242
243 //===----------------------------------------------------------------------===//
244 // GarbageCollect2Stack Pass Implementation
245 //===----------------------------------------------------------------------===//
246
247 namespace {
248     /// This pass replaces GC calls with alloca's
249     ///
250     class VISIBILITY_HIDDEN GarbageCollect2Stack : public FunctionPass {
251         StringMap<FunctionInfo*> KnownFunctions;
252         Module* M;
253        
254         FunctionInfo AllocMemoryT;
255         ArrayFI NewArrayVT;
256         ArrayFI NewArrayT;
257         AllocClassFI AllocClass;
258        
259     public:
260         static char ID; // Pass identification
261         GarbageCollect2Stack();
262        
263         bool doInitialization(Module &M) {
264             this->M = &M;
265             return false;
266         }
267        
268         bool runOnFunction(Function &F);
269        
270         virtual void getAnalysisUsage(AnalysisUsage &AU) const {
271           AU.addRequired<TargetData>();
272           AU.addRequired<DominatorTree>();
273          
274           AU.addPreserved<CallGraph>();
275           AU.addPreserved<DominatorTree>();
276         }
277     };
278     char GarbageCollect2Stack::ID = 0;
279 } // end anonymous namespace.
280
281 static RegisterPass<GarbageCollect2Stack>
282 X("dgc2stack", "Promote (GC'ed) heap allocations to stack");
283
284 // Public interface to the pass.
285 FunctionPass *createGarbageCollect2Stack() {
286   return new GarbageCollect2Stack();
287 }
288
289 GarbageCollect2Stack::GarbageCollect2Stack()
290 : FunctionPass(&ID),
291   AllocMemoryT(0, true),
292   NewArrayVT(0, true, false, 1),
293   NewArrayT(0, true, true, 1)
294 {
295     KnownFunctions["_d_allocmemoryT"] = &AllocMemoryT;
296     KnownFunctions["_d_newarrayvT"] = &NewArrayVT;
297     KnownFunctions["_d_newarrayT"] = &NewArrayT;
298     KnownFunctions["_d_allocclass"] = &AllocClass;
299 }
300
301 static void RemoveCall(CallSite CS, const Analysis& A) {
302     if (CS.isInvoke()) {
303         InvokeInst* Invoke = cast<InvokeInst>(CS.getInstruction());
304         // If this was an invoke instruction, we need to do some extra
305         // work to preserve the control flow.
306        
307         // Create a "conditional" branch that -simplifycfg can clean up, so we
308         // can keep using the DominatorTree without updating it.
309         BranchInst::Create(Invoke->getNormalDest(), Invoke->getUnwindDest(),
310             ConstantInt::getTrue(A.M.getContext()), Invoke->getParent());
311     }
312     // Remove the runtime call.
313     if (A.CGNode)
314         A.CGNode->removeCallEdgeFor(CS);
315     CS.getInstruction()->eraseFromParent();
316 }
317
318 static bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT);
319
320 /// runOnFunction - Top level algorithm.
321 ///
322 bool GarbageCollect2Stack::runOnFunction(Function &F) {
323     DEBUG(errs() << "\nRunning -dgc2stack on function " << F.getName() << '\n');
324    
325     TargetData& TD = getAnalysis<TargetData>();
326     DominatorTree& DT = getAnalysis<DominatorTree>();
327     CallGraph* CG = getAnalysisIfAvailable<CallGraph>();
328     CallGraphNode* CGNode = CG ? (*CG)[&F] : NULL;
329    
330     Analysis A = { TD, *M, CG, CGNode };
331    
332     BasicBlock& Entry = F.getEntryBlock();
333    
334     IRBuilder<> AllocaBuilder(&Entry, Entry.begin());
335    
336     bool Changed = false;
337     for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
338         for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
339             // Ignore non-calls.
340             Instruction* Inst = I++;
341             CallSite CS = CallSite::get(Inst);
342             if (!CS.getInstruction())
343                 continue;
344            
345             // Ignore indirect calls and calls to non-external functions.
346             Function *Callee = CS.getCalledFunction();
347             if (Callee == 0 || !Callee->isDeclaration() ||
348                     !(Callee->hasExternalLinkage() || Callee->hasDLLImportLinkage()))
349                 continue;
350            
351             // Ignore unknown calls.
352             StringMap<FunctionInfo*>::iterator OMI =
353                 KnownFunctions.find(Callee->getName());
354             if (OMI == KnownFunctions.end()) continue;
355            
356             assert(isa<PointerType>(Inst->getType())
357                 && "GC function doesn't return a pointer?");
358            
359             FunctionInfo* info = OMI->getValue();
360            
361             if (Inst->use_empty() && info->SafeToDelete) {
362                 Changed = true;
363                 NumDeleted++;
364                 RemoveCall(CS, A);
365                 continue;
366             }
367            
368             DEBUG(errs() << "GarbageCollect2Stack inspecting: " << *Inst);
369            
370             if (!info->analyze(CS, A) || !isSafeToStackAllocate(Inst, DT))
371                 continue;
372            
373             // Let's alloca this!
374             Changed = true;
375            
376             IRBuilder<> Builder(BB, Inst);
377             Value* newVal = info->promote(CS, Builder, A);
378            
379             DEBUG(errs() << "Promoted to: " << *newVal);
380            
381             // Make sure the type is the same as it was before, and replace all
382             // uses of the runtime call with the alloca.
383             if (newVal->getType() != Inst->getType())
384                 newVal = Builder.CreateBitCast(newVal, Inst->getType());
385             Inst->replaceAllUsesWith(newVal);
386            
387             RemoveCall(CS, A);
388         }
389     }
390    
391     return Changed;
392 }
393
394 const Type* Analysis::getTypeFor(Value* typeinfo) const {
395     GlobalVariable* ti_global = dyn_cast<GlobalVariable>(typeinfo->stripPointerCasts());
396     if (!ti_global)
397         return NULL;
398    
399     std::string metaname = TD_PREFIX;
400     metaname += ti_global->getName();
401
402     NamedMDNode* meta = M.getNamedMetadata(metaname);
403     if (!meta)
404         return NULL;
405
406     MDNode* node = static_cast<MDNode*>(meta->getElement(0));
407     if (!node)
408         return NULL;
409
410     if (MD_GetNumElements(node) != TD_NumFields)
411         return NULL;
412     if (TD_Confirm >= 0 && (!MD_GetElement(node, TD_Confirm) ||
413             MD_GetElement(node, TD_Confirm)->stripPointerCasts() != ti_global))
414         return NULL;
415    
416     return MD_GetElement(node, TD_Type)->getType();
417 }
418
419 /// Returns whether Def is used by any instruction that is reachable from Alloc
420 /// (without executing Def again).
421 static bool mayBeUsedAfterRealloc(Instruction* Def, Instruction* Alloc, DominatorTree& DT) {
422     DEBUG(errs() << "### mayBeUsedAfterRealloc()\n" << *Def << *Alloc);
423    
424     // If the definition isn't used it obviously won't be used after the
425     // allocation.
426     // If it does not dominate the allocation, there's no way for it to be used
427     // without going through Def again first, since the definition couldn't
428     // dominate the user either.
429     if (Def->use_empty() || !DT.dominates(Def, Alloc)) {
430         DEBUG(errs() << "### No uses or does not dominate allocation\n");
431         return false;
432     }
433    
434     DEBUG(errs() << "### Def dominates Alloc\n");
435    
436     BasicBlock* DefBlock = Def->getParent();
437     BasicBlock* AllocBlock = Alloc->getParent();
438    
439     // Create a set of users and one of blocks containing users.
440     SmallSet<User*, 16> Users;
441     SmallSet<BasicBlock*, 16> UserBlocks;
442     for (Instruction::use_iterator UI = Def->use_begin(), UE = Def->use_end();
443          UI != UE; ++UI) {
444         Instruction* User = cast<Instruction>(*UI);
445         DEBUG(errs() << "USER: " << *User);
446         BasicBlock* UserBlock = User->getParent();
447        
448         // This dominance check is not performed if they're in the same block
449         // because it will just walk the instruction list to figure it out.
450         // We will instead do that ourselves in the first iteration (for all
451         // users at once).
452         if (AllocBlock != UserBlock && DT.dominates(AllocBlock, UserBlock)) {
453             // There's definitely a path from alloc to this user that does not
454             // go through Def, namely any path that ends up in that user.
455             DEBUG(errs() << "### Alloc dominates user " << *User);
456             return true;
457         }
458        
459         // Phi nodes are checked separately, so no need to enter them here.
460         if (!isa<PHINode>(User)) {
461             Users.insert(User);
462             UserBlocks.insert(UserBlock);
463         }
464     }
465    
466     // Contains first instruction of block to inspect.
467     typedef std::pair<BasicBlock*, BasicBlock::iterator> StartPoint;
468     SmallVector<StartPoint, 16> Worklist;
469     // Keeps track of successors that have been added to the work list.
470     SmallSet<BasicBlock*, 16> Visited;
471    
472     // Start just after the allocation.
473     // Note that we don't insert AllocBlock into the Visited set here so the
474     // start of the block will get inspected if it's reachable.
475     BasicBlock::iterator Start = Alloc;
476     ++Start;
477     Worklist.push_back(StartPoint(AllocBlock, Start));
478    
479     while (!Worklist.empty()) {
480         StartPoint sp = Worklist.pop_back_val();
481         BasicBlock* B = sp.first;
482         BasicBlock::iterator BBI = sp.second;
483         // BBI is either just after the allocation (in the first iteration)
484         // or just after the last phi node in B (in subsequent iterations) here.
485        
486         // This whole 'if' is just a way to avoid performing the inner 'for'
487         // loop when it can be determined not to be necessary, avoiding
488         // potentially expensive walks of the instruction list.
489         // It should be equivalent to just doing the loop.
490         if (UserBlocks.count(B)) {
491             if (B != DefBlock && B != AllocBlock) {
492                 // This block does not contain the definition or the allocation,
493                 // so any user in this block is definitely reachable without
494                 // finding either the definition or the allocation.
495                 DEBUG(errs() << "### Block " << B->getName()
496                      << " contains a reachable user\n");
497                 return true;
498             }
499             // We need to walk the instructions in the block to see whether we
500             // reach a user before we reach the definition or the allocation.
501             for (BasicBlock::iterator E = B->end(); BBI != E; ++BBI) {
502                 if (&*BBI == Alloc || &*BBI == Def)
503                     break;
504                 if (Users.count(BBI)) {
505                     DEBUG(errs() << "### Problematic user: " << *BBI);
506                     return true;
507                 }
508             }
509         } else if (B == DefBlock || (B == AllocBlock && BBI != Start)) {
510             // There are no users in the block so the def or the allocation
511             // will be encountered before any users though this path.
512             // Skip to the next item on the worklist.
513             continue;
514         } else {
515             // No users and no definition or allocation after the start point,
516             // so just keep going.
517         }
518        
519         // All instructions after the starting point in this block have been
520         // accounted for. Look for successors to add to the work list.
521         TerminatorInst* Term = B->getTerminator();
522         unsigned SuccCount = Term->getNumSuccessors();
523         for (unsigned i = 0; i < SuccCount; i++) {
524             BasicBlock* Succ = Term->getSuccessor(i);
525             BBI = Succ->begin();
526             // Check phi nodes here because we only care about the operand
527             // coming in from this block.
528             bool SeenDef = false;
529             while (isa<PHINode>(BBI)) {
530                 if (Def == cast<PHINode>(BBI)->getIncomingValueForBlock(B)) {
531                     DEBUG(errs() << "### Problematic phi user: " << *BBI);
532                     return true;
533                 }
534                 SeenDef |= (Def == &*BBI);
535                 ++BBI;
536             }
537             // If none of the phis we just looked at were the definition, we
538             // haven't seen this block yet, and it's dominated by the def
539             // (meaning paths through it could lead to users), add the block and
540             // the first non-phi to the worklist.
541             if (!SeenDef && Visited.insert(Succ) && DT.dominates(DefBlock, Succ))
542                 Worklist.push_back(StartPoint(Succ, BBI));
543         }
544     }
545     // No users found in any block reachable from Alloc
546     // without going through the definition again.
547     return false;
548 }
549
550
551 /// isSafeToStackAllocate - Return true if the GC call passed in is safe to turn
552 /// into a stack allocation. This requires that the return value does not
553 /// escape from the function and no derived pointers are live at the call site
554 /// (i.e. if it's in a loop then the function can't use any pointer returned
555 /// from an earlier call after a new call has been made)
556 ///
557 /// This is currently conservative where loops are involved: it can handle
558 /// simple loops, but returns false if any derived pointer is used in a
559 /// subsequent iteration.
560 ///
561 /// Based on LLVM's PointerMayBeCaptured(), which only does escape analysis but
562 /// doesn't care about loops.
563 bool isSafeToStackAllocate(Instruction* Alloc, DominatorTree& DT) {
564   assert(isa<PointerType>(Alloc->getType()) && "Allocation is not a pointer?");
565   Value* V = Alloc;
566  
567   SmallVector<Use*, 16> Worklist;
568   SmallSet<Use*, 16> Visited;
569  
570   for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
571        UI != UE; ++UI) {
572     Use *U = &UI.getUse();
573     Visited.insert(U);
574     Worklist.push_back(U);
575   }
576  
577   while (!Worklist.empty()) {
578     Use *U = Worklist.pop_back_val();
579     Instruction *I = cast<Instruction>(U->getUser());
580     V = U->get();
581    
582     switch (I->getOpcode()) {
583     case Instruction::Call:
584     case Instruction::Invoke: {
585       CallSite CS = CallSite::get(I);
586       // Not captured if the callee is readonly, doesn't return a copy through
587       // its return value and doesn't unwind (a readonly function can leak bits
588       // by throwing an exception or not depending on the input value).
589       if (CS.onlyReadsMemory() && CS.doesNotThrow() &&
590           I->getType() == Type::getVoidTy(I->getContext()))
591         break;
592      
593       // Not captured if only passed via 'nocapture' arguments.  Note that
594       // calling a function pointer does not in itself cause the pointer to
595       // be captured.  This is a subtle point considering that (for example)
596       // the callee might return its own address.  It is analogous to saying
597       // that loading a value from a pointer does not cause the pointer to be
598       // captured, even though the loaded value might be the pointer itself
599       // (think of self-referential objects).
600       CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
601       for (CallSite::arg_iterator A = B; A != E; ++A)
602         if (A->get() == V && !CS.paramHasAttr(A - B + 1, Attribute::NoCapture))
603           // The parameter is not marked 'nocapture' - captured.
604           return false;
605       // Only passed via 'nocapture' arguments, or is the called function - not
606       // captured.
607       break;
608     }
609     case Instruction::Free:
610       // Freeing a pointer does not cause it to be captured.
611       break;
612     case Instruction::Load:
613       // Loading from a pointer does not cause it to be captured.
614       break;
615     case Instruction::Store:
616       if (V == I->getOperand(0))
617         // Stored the pointer - it may be captured.
618         return false;
619       // Storing to the pointee does not cause the pointer to be captured.
620       break;
621     case Instruction::BitCast:
622     case Instruction::GetElementPtr:
623     case Instruction::PHI:
624     case Instruction::Select:
625       // It's not safe to stack-allocate if this derived pointer is live across
626       // the original allocation.
627       if (mayBeUsedAfterRealloc(I, Alloc, DT))
628         return false;
629      
630       // The original value is not captured via this if the new value isn't.
631       for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
632            UI != UE; ++UI) {
633         Use *U = &UI.getUse();
634         if (Visited.insert(U))
635           Worklist.push_back(U);
636       }
637       break;
638     default:
639       // Something else - be conservative and say it is captured.
640       return false;
641     }
642   }
643  
644   // All uses examined - not captured or live across original allocation.
645   return true;
646 }
647
648 #endif // USE_METADATA
Note: See TracBrowser for help on using the browser.
Copyright © 2008, LDC Development Team.