Coverage Report

Created: 2018-12-14 11:24

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/Utils/Local.h
Line
Count
Source
1
//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This family of functions perform various local transformations to the
11
// program.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
16
#define LLVM_ANALYSIS_UTILS_LOCAL_H
17
18
#include "llvm/IR/DataLayout.h"
19
#include "llvm/IR/GetElementPtrTypeIterator.h"
20
21
namespace llvm {
22
23
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
24
/// compute the offset from the base pointer (without adding in the base
25
/// pointer). Return the result as a signed integer of intptr size.
26
/// When NoAssumptions is true, no assumptions about index computation not
27
/// overflowing is made.
28
template <typename IRBuilderTy>
29
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
30
367
                     bool NoAssumptions = false) {
31
367
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
367
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
367
  Value *Result = Constant::getNullValue(IntPtrTy);
34
367
35
367
  // If the GEP is inbounds, we know that none of the addressing operations will
36
367
  // overflow in an unsigned sense.
37
367
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions362
;
38
367
39
367
  // Build a mask for high order bits.
40
367
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
367
  uint64_t PtrSizeMask =
42
367
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
367
44
367
  gep_type_iterator GTI = gep_type_begin(GEP);
45
795
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
428
       ++i, ++GTI) {
47
428
    Value *Op = *i;
48
428
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
428
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
87
      if (OpC->isZeroValue())
51
53
        continue;
52
34
53
34
      // Handle a struct index, which adds its field offset to the pointer.
54
34
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
29
67
29
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
29
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
29
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
29
      // Emit an add instruction.
71
29
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
29
      continue;
73
29
    }
74
341
    // Convert to correct type.
75
341
    if (Op->getType() != IntPtrTy)
76
3
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
341
    if (Size != 1) {
78
162
      // We'll let instcombine(mul) convert this to a shl if possible.
79
162
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
162
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
162
    }
82
341
83
341
    // Emit an add instruction.
84
341
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
341
  }
86
367
  return Result;
87
367
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
28
                     bool NoAssumptions = false) {
31
28
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
28
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
28
  Value *Result = Constant::getNullValue(IntPtrTy);
34
28
35
28
  // If the GEP is inbounds, we know that none of the addressing operations will
36
28
  // overflow in an unsigned sense.
37
28
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions27
;
38
28
39
28
  // Build a mask for high order bits.
40
28
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
28
  uint64_t PtrSizeMask =
42
28
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
28
44
28
  gep_type_iterator GTI = gep_type_begin(GEP);
45
76
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
48
       ++i, ++GTI) {
47
48
    Value *Op = *i;
48
48
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
48
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
24
      if (OpC->isZeroValue())
51
16
        continue;
52
8
53
8
      // Handle a struct index, which adds its field offset to the pointer.
54
8
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
0
        if (OpC->getType()->isVectorTy())
56
0
          OpC = OpC->getSplatValue();
57
0
58
0
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
0
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
0
61
0
        if (Size)
62
0
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
0
                                      GEP->getName()+".offs");
64
0
        continue;
65
0
      }
66
8
67
8
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
8
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
8
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
8
      // Emit an add instruction.
71
8
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
8
      continue;
73
8
    }
74
24
    // Convert to correct type.
75
24
    if (Op->getType() != IntPtrTy)
76
3
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
24
    if (Size != 1) {
78
20
      // We'll let instcombine(mul) convert this to a shl if possible.
79
20
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
20
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
20
    }
82
24
83
24
    // Emit an add instruction.
84
24
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
24
  }
86
28
  return Result;
87
28
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
339
                     bool NoAssumptions = false) {
31
339
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
339
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
339
  Value *Result = Constant::getNullValue(IntPtrTy);
34
339
35
339
  // If the GEP is inbounds, we know that none of the addressing operations will
36
339
  // overflow in an unsigned sense.
37
339
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions335
;
38
339
39
339
  // Build a mask for high order bits.
40
339
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
339
  uint64_t PtrSizeMask =
42
339
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
339
44
339
  gep_type_iterator GTI = gep_type_begin(GEP);
45
719
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
380
       ++i, ++GTI) {
47
380
    Value *Op = *i;
48
380
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
380
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
63
      if (OpC->isZeroValue())
51
37
        continue;
52
26
53
26
      // Handle a struct index, which adds its field offset to the pointer.
54
26
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
21
67
21
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
21
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
21
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
21
      // Emit an add instruction.
71
21
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
21
      continue;
73
21
    }
74
317
    // Convert to correct type.
75
317
    if (Op->getType() != IntPtrTy)
76
0
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
317
    if (Size != 1) {
78
142
      // We'll let instcombine(mul) convert this to a shl if possible.
79
142
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
142
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
142
    }
82
317
83
317
    // Emit an add instruction.
84
317
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
317
  }
86
339
  return Result;
87
339
}
88
89
}
90
91
#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H