Coverage Report

Created: 2018-09-19 08:35

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/Utils/Local.h
Line
Count
Source
1
//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This family of functions perform various local transformations to the
11
// program.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
16
#define LLVM_ANALYSIS_UTILS_LOCAL_H
17
18
#include "llvm/IR/DataLayout.h"
19
#include "llvm/IR/GetElementPtrTypeIterator.h"
20
21
namespace llvm {
22
23
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
24
/// compute the offset from the base pointer (without adding in the base
25
/// pointer). Return the result as a signed integer of intptr size.
26
/// When NoAssumptions is true, no assumptions about index computation not
27
/// overflowing is made.
28
template <typename IRBuilderTy>
29
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
30
370
                     bool NoAssumptions = false) {
31
370
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
370
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
370
  Value *Result = Constant::getNullValue(IntPtrTy);
34
370
35
370
  // If the GEP is inbounds, we know that none of the addressing operations will
36
370
  // overflow in an unsigned sense.
37
370
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions316
;
38
370
39
370
  // Build a mask for high order bits.
40
370
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
370
  uint64_t PtrSizeMask =
42
370
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
370
44
370
  gep_type_iterator GTI = gep_type_begin(GEP);
45
801
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
431
       ++i, ++GTI) {
47
431
    Value *Op = *i;
48
431
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
431
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
132
      if (OpC->isZeroValue())
51
53
        continue;
52
79
53
79
      // Handle a struct index, which adds its field offset to the pointer.
54
79
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
74
67
74
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
74
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
74
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
74
      // Emit an add instruction.
71
74
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
74
      continue;
73
74
    }
74
299
    // Convert to correct type.
75
299
    if (Op->getType() != IntPtrTy)
76
3
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
299
    if (Size != 1) {
78
114
      // We'll let instcombine(mul) convert this to a shl if possible.
79
114
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
114
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
114
    }
82
299
83
299
    // Emit an add instruction.
84
299
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
299
  }
86
370
  return Result;
87
370
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
28
                     bool NoAssumptions = false) {
31
28
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
28
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
28
  Value *Result = Constant::getNullValue(IntPtrTy);
34
28
35
28
  // If the GEP is inbounds, we know that none of the addressing operations will
36
28
  // overflow in an unsigned sense.
37
28
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions27
;
38
28
39
28
  // Build a mask for high order bits.
40
28
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
28
  uint64_t PtrSizeMask =
42
28
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
28
44
28
  gep_type_iterator GTI = gep_type_begin(GEP);
45
76
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
48
       ++i, ++GTI) {
47
48
    Value *Op = *i;
48
48
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
48
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
24
      if (OpC->isZeroValue())
51
16
        continue;
52
8
53
8
      // Handle a struct index, which adds its field offset to the pointer.
54
8
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
0
        if (OpC->getType()->isVectorTy())
56
0
          OpC = OpC->getSplatValue();
57
0
58
0
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
0
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
0
61
0
        if (Size)
62
0
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
0
                                      GEP->getName()+".offs");
64
0
        continue;
65
0
      }
66
8
67
8
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
8
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
8
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
8
      // Emit an add instruction.
71
8
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
8
      continue;
73
8
    }
74
24
    // Convert to correct type.
75
24
    if (Op->getType() != IntPtrTy)
76
3
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
24
    if (Size != 1) {
78
20
      // We'll let instcombine(mul) convert this to a shl if possible.
79
20
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
20
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
20
    }
82
24
83
24
    // Emit an add instruction.
84
24
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
24
  }
86
28
  return Result;
87
28
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
30
342
                     bool NoAssumptions = false) {
31
342
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
32
342
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
33
342
  Value *Result = Constant::getNullValue(IntPtrTy);
34
342
35
342
  // If the GEP is inbounds, we know that none of the addressing operations will
36
342
  // overflow in an unsigned sense.
37
342
  bool isInBounds = GEPOp->isInBounds() && 
!NoAssumptions289
;
38
342
39
342
  // Build a mask for high order bits.
40
342
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
41
342
  uint64_t PtrSizeMask =
42
342
      std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43
342
44
342
  gep_type_iterator GTI = gep_type_begin(GEP);
45
725
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46
383
       ++i, ++GTI) {
47
383
    Value *Op = *i;
48
383
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
49
383
    if (Constant *OpC = dyn_cast<Constant>(Op)) {
50
108
      if (OpC->isZeroValue())
51
37
        continue;
52
71
53
71
      // Handle a struct index, which adds its field offset to the pointer.
54
71
      if (StructType *STy = GTI.getStructTypeOrNull()) {
55
5
        if (OpC->getType()->isVectorTy())
56
3
          OpC = OpC->getSplatValue();
57
5
58
5
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
59
5
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
60
5
61
5
        if (Size)
62
5
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
63
5
                                      GEP->getName()+".offs");
64
5
        continue;
65
5
      }
66
66
67
66
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
68
66
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
69
66
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
70
66
      // Emit an add instruction.
71
66
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
72
66
      continue;
73
66
    }
74
275
    // Convert to correct type.
75
275
    if (Op->getType() != IntPtrTy)
76
0
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
77
275
    if (Size != 1) {
78
94
      // We'll let instcombine(mul) convert this to a shl if possible.
79
94
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
80
94
                              GEP->getName()+".idx", isInBounds /*NUW*/);
81
94
    }
82
275
83
275
    // Emit an add instruction.
84
275
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
85
275
  }
86
342
  return Result;
87
342
}
88
89
}
90
91
#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H