/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/Utils/Local.h
Line | Count | Source |
1 | | //===- Local.h - Functions to perform local transformations -----*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This family of functions perform various local transformations to the |
10 | | // program. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #ifndef LLVM_ANALYSIS_UTILS_LOCAL_H |
15 | | #define LLVM_ANALYSIS_UTILS_LOCAL_H |
16 | | |
17 | | #include "llvm/IR/DataLayout.h" |
18 | | #include "llvm/IR/GetElementPtrTypeIterator.h" |
19 | | |
20 | | namespace llvm { |
21 | | |
22 | | /// Given a getelementptr instruction/constantexpr, emit the code necessary to |
23 | | /// compute the offset from the base pointer (without adding in the base |
24 | | /// pointer). Return the result as a signed integer of intptr size. |
25 | | /// When NoAssumptions is true, no assumptions about index computation not |
26 | | /// overflowing is made. |
27 | | template <typename IRBuilderTy> |
28 | | Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, |
29 | 377 | bool NoAssumptions = false) { |
30 | 377 | GEPOperator *GEPOp = cast<GEPOperator>(GEP); |
31 | 377 | Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); |
32 | 377 | Value *Result = Constant::getNullValue(IntPtrTy); |
33 | 377 | |
34 | 377 | // If the GEP is inbounds, we know that none of the addressing operations will |
35 | 377 | // overflow in an unsigned sense. |
36 | 377 | bool isInBounds = GEPOp->isInBounds() && !NoAssumptions372 ; |
37 | 377 | |
38 | 377 | // Build a mask for high order bits. |
39 | 377 | unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); |
40 | 377 | uint64_t PtrSizeMask = |
41 | 377 | std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth); |
42 | 377 | |
43 | 377 | gep_type_iterator GTI = gep_type_begin(GEP); |
44 | 815 | for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; |
45 | 438 | ++i, ++GTI) { |
46 | 438 | Value *Op = *i; |
47 | 438 | uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; |
48 | 438 | if (Constant *OpC = dyn_cast<Constant>(Op)) { |
49 | 88 | if (OpC->isZeroValue()) |
50 | 53 | continue; |
51 | 35 | |
52 | 35 | // Handle a struct index, which adds its field offset to the pointer. |
53 | 35 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
54 | 5 | if (OpC->getType()->isVectorTy()) |
55 | 3 | OpC = OpC->getSplatValue(); |
56 | 5 | |
57 | 5 | uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue(); |
58 | 5 | Size = DL.getStructLayout(STy)->getElementOffset(OpValue); |
59 | 5 | |
60 | 5 | if (Size) |
61 | 5 | Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), |
62 | 5 | GEP->getName()+".offs"); |
63 | 5 | continue; |
64 | 5 | } |
65 | 30 | |
66 | 30 | Constant *Scale = ConstantInt::get(IntPtrTy, Size); |
67 | 30 | Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); |
68 | 30 | Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); |
69 | 30 | // Emit an add instruction. |
70 | 30 | Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); |
71 | 30 | continue; |
72 | 30 | } |
73 | 350 | // Convert to correct type. |
74 | 350 | if (Op->getType() != IntPtrTy) |
75 | 3 | Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); |
76 | 350 | if (Size != 1) { |
77 | 163 | // We'll let instcombine(mul) convert this to a shl if possible. |
78 | 163 | Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), |
79 | 163 | GEP->getName()+".idx", isInBounds /*NUW*/); |
80 | 163 | } |
81 | 350 | |
82 | 350 | // Emit an add instruction. |
83 | 350 | Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); |
84 | 350 | } |
85 | 377 | return Result; |
86 | 377 | } llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter>*, llvm::DataLayout const&, llvm::User*, bool) Line | Count | Source | 29 | 29 | bool NoAssumptions = false) { | 30 | 29 | GEPOperator *GEPOp = cast<GEPOperator>(GEP); | 31 | 29 | Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); | 32 | 29 | Value *Result = Constant::getNullValue(IntPtrTy); | 33 | 29 | | 34 | 29 | // If the GEP is inbounds, we know that none of the addressing operations will | 35 | 29 | // overflow in an unsigned sense. | 36 | 29 | bool isInBounds = GEPOp->isInBounds() && !NoAssumptions28 ; | 37 | 29 | | 38 | 29 | // Build a mask for high order bits. | 39 | 29 | unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); | 40 | 29 | uint64_t PtrSizeMask = | 41 | 29 | std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth); | 42 | 29 | | 43 | 29 | gep_type_iterator GTI = gep_type_begin(GEP); | 44 | 78 | for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; | 45 | 49 | ++i, ++GTI) { | 46 | 49 | Value *Op = *i; | 47 | 49 | uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; | 48 | 49 | if (Constant *OpC = dyn_cast<Constant>(Op)) { | 49 | 25 | if (OpC->isZeroValue()) | 50 | 16 | continue; | 51 | 9 | | 52 | 9 | // Handle a struct index, which adds its field offset to the pointer. | 53 | 9 | if (StructType *STy = GTI.getStructTypeOrNull()) { | 54 | 0 | if (OpC->getType()->isVectorTy()) | 55 | 0 | OpC = OpC->getSplatValue(); | 56 | 0 |
| 57 | 0 | uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue(); | 58 | 0 | Size = DL.getStructLayout(STy)->getElementOffset(OpValue); | 59 | 0 |
| 60 | 0 | if (Size) | 61 | 0 | Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), | 62 | 0 | GEP->getName()+".offs"); | 63 | 0 | continue; | 64 | 0 | } | 65 | 9 | | 66 | 9 | Constant *Scale = ConstantInt::get(IntPtrTy, Size); | 67 | 9 | Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); | 68 | 9 | Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); | 69 | 9 | // Emit an add instruction. | 70 | 9 | Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); | 71 | 9 | continue; | 72 | 9 | } | 73 | 24 | // Convert to correct type. | 74 | 24 | if (Op->getType() != IntPtrTy) | 75 | 3 | Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); | 76 | 24 | if (Size != 1) { | 77 | 20 | // We'll let instcombine(mul) convert this to a shl if possible. | 78 | 20 | Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), | 79 | 20 | GEP->getName()+".idx", isInBounds /*NUW*/); | 80 | 20 | } | 81 | 24 | | 82 | 24 | // Emit an add instruction. | 83 | 24 | Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); | 84 | 24 | } | 85 | 29 | return Result; | 86 | 29 | } |
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter>*, llvm::DataLayout const&, llvm::User*, bool) Line | Count | Source | 29 | 348 | bool NoAssumptions = false) { | 30 | 348 | GEPOperator *GEPOp = cast<GEPOperator>(GEP); | 31 | 348 | Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); | 32 | 348 | Value *Result = Constant::getNullValue(IntPtrTy); | 33 | 348 | | 34 | 348 | // If the GEP is inbounds, we know that none of the addressing operations will | 35 | 348 | // overflow in an unsigned sense. | 36 | 348 | bool isInBounds = GEPOp->isInBounds() && !NoAssumptions344 ; | 37 | 348 | | 38 | 348 | // Build a mask for high order bits. | 39 | 348 | unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); | 40 | 348 | uint64_t PtrSizeMask = | 41 | 348 | std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth); | 42 | 348 | | 43 | 348 | gep_type_iterator GTI = gep_type_begin(GEP); | 44 | 737 | for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; | 45 | 389 | ++i, ++GTI) { | 46 | 389 | Value *Op = *i; | 47 | 389 | uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; | 48 | 389 | if (Constant *OpC = dyn_cast<Constant>(Op)) { | 49 | 63 | if (OpC->isZeroValue()) | 50 | 37 | continue; | 51 | 26 | | 52 | 26 | // Handle a struct index, which adds its field offset to the pointer. | 53 | 26 | if (StructType *STy = GTI.getStructTypeOrNull()) { | 54 | 5 | if (OpC->getType()->isVectorTy()) | 55 | 3 | OpC = OpC->getSplatValue(); | 56 | 5 | | 57 | 5 | uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue(); | 58 | 5 | Size = DL.getStructLayout(STy)->getElementOffset(OpValue); | 59 | 5 | | 60 | 5 | if (Size) | 61 | 5 | Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), | 62 | 5 | GEP->getName()+".offs"); | 63 | 5 | continue; | 64 | 5 | } | 65 | 21 | | 66 | 21 | Constant *Scale = ConstantInt::get(IntPtrTy, Size); | 67 | 21 | Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); | 68 | 21 | Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); | 69 | 21 | // Emit an add instruction. | 70 | 21 | Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); | 71 | 21 | continue; | 72 | 21 | } | 73 | 326 | // Convert to correct type. | 74 | 326 | if (Op->getType() != IntPtrTy) | 75 | 0 | Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); | 76 | 326 | if (Size != 1) { | 77 | 143 | // We'll let instcombine(mul) convert this to a shl if possible. | 78 | 143 | Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), | 79 | 143 | GEP->getName()+".idx", isInBounds /*NUW*/); | 80 | 143 | } | 81 | 326 | | 82 | 326 | // Emit an add instruction. | 83 | 326 | Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); | 84 | 326 | } | 85 | 348 | return Result; | 86 | 348 | } |
|
87 | | |
88 | | } |
89 | | |
90 | | #endif // LLVM_TRANSFORMS_UTILS_LOCAL_H |