Coverage Report

Created: 2018-07-18 22:01

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/Utils/Local.h
Line
Count
Source
1
//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This family of functions perform various local transformations to the
11
// program.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
16
#define LLVM_ANALYSIS_UTILS_LOCAL_H
17
18
#include "llvm/IR/DataLayout.h"
19
#include "llvm/IR/GetElementPtrTypeIterator.h"
20
21
namespace llvm {
22
23
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
24
/// compute the offset from the base pointer (without adding in the base
25
/// pointer). Return the result as a signed integer of intptr size.
26
/// When NoAssumptions is true, no assumptions about index computation not
27
/// overflowing is made.
28
template <typename IRBuilderTy>
29
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
30
299
                     bool NoAssumptions = false) {
31
299
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
299
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
299
  Value *Result = Constant::getNullValue(IntPtrTy);
34
299
35
299
  // If the GEP is inbounds, we know that none of the addressing operations will
36
299
  // overflow in an unsigned sense.
37
299
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions287
;
38
299
39
299
  // Build a mask for high order bits.
40
299
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
299
  uint64_t PtrSizeMask =
42
299
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
299
44
299
  gep_type_iterator GTI = gep_type_begin(GEP);
45
643
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
344
       ++i, ++GTI) {
47
344
    Value *Op = *i;
48
344
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
344
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
79
      if (OpC->isZeroValue())
51
41
        continue;
52
38
53
38
      // Handle a struct index, which adds its field offset to the pointer.
54
38
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
33
67
33
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
33
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
33
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
33
      // Emit an add instruction.
71
33
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
33
      continue;
73
33
    }
74
265
    // Convert to correct type.
75
265
    if (Op->getType() != IntPtrTy)
76
2
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
265
    if (Size != 1) {
78
97
      // We'll let instcombine(mul) convert this to a shl if possible.
79
97
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
97
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
97
    }
82
265
83
265
    // Emit an add instruction.
84
265
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
265
  }
86
299
  return Result;
87
299
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
16
                     bool NoAssumptions = false) {
31
16
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
16
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
16
  Value *Result = Constant::getNullValue(IntPtrTy);
34
16
35
16
  // If the GEP is inbounds, we know that none of the addressing operations will
36
16
  // overflow in an unsigned sense.
37
16
  bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
38
16
39
16
  // Build a mask for high order bits.
40
16
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
16
  uint64_t PtrSizeMask =
42
16
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
16
44
16
  gep_type_iterator GTI = gep_type_begin(GEP);
45
36
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
20
       ++i, ++GTI) {
47
20
    Value *Op = *i;
48
20
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
20
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
12
      if (OpC->isZeroValue())
51
4
        continue;
52
8
53
8
      // Handle a struct index, which adds its field offset to the pointer.
54
8
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
0
        if (OpC->getType()->isVectorTy())
56
0
          OpC = OpC->getSplatValue();
57
0
58
0
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
0
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
0
61
0
        if (Size)
62
0
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
0
                                      GEP->getName()+".offs");
64
0
        continue;
65
0
      }
66
8
67
8
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
8
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
8
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
8
      // Emit an add instruction.
71
8
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
8
      continue;
73
8
    }
74
8
    // Convert to correct type.
75
8
    if (Op->getType() != IntPtrTy)
76
2
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
8
    if (Size != 1) {
78
4
      // We'll let instcombine(mul) convert this to a shl if possible.
79
4
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
4
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
4
    }
82
8
83
8
    // Emit an add instruction.
84
8
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
8
  }
86
16
  return Result;
87
16
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
283
                     bool NoAssumptions = false) {
31
283
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
283
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
283
  Value *Result = Constant::getNullValue(IntPtrTy);
34
283
35
283
  // If the GEP is inbounds, we know that none of the addressing operations will
36
283
  // overflow in an unsigned sense.
37
283
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions271
;
38
283
39
283
  // Build a mask for high order bits.
40
283
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
283
  uint64_t PtrSizeMask =
42
283
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
283
44
283
  gep_type_iterator GTI = gep_type_begin(GEP);
45
607
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
324
       ++i, ++GTI) {
47
324
    Value *Op = *i;
48
324
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
324
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
67
      if (OpC->isZeroValue())
51
37
        continue;
52
30
53
30
      // Handle a struct index, which adds its field offset to the pointer.
54
30
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
25
67
25
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
25
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
25
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
25
      // Emit an add instruction.
71
25
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
25
      continue;
73
25
    }
74
257
    // Convert to correct type.
75
257
    if (Op->getType() != IntPtrTy)
76
0
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
257
    if (Size != 1) {
78
93
      // We'll let instcombine(mul) convert this to a shl if possible.
79
93
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
93
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
93
    }
82
257
83
257
    // Emit an add instruction.
84
257
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
257
  }
86
283
  return Result;
87
283
}
88
89
}
90
91
#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H