Coverage Report

Created: 2018-09-17 19:50

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/ValueTracking.h
Line
Count
Source (jump to first uncovered line)
1
//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This file contains routines that help analyze properties that chains of
11
// computations have.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_ANALYSIS_VALUETRACKING_H
16
#define LLVM_ANALYSIS_VALUETRACKING_H
17
18
#include "llvm/ADT/ArrayRef.h"
19
#include "llvm/ADT/Optional.h"
20
#include "llvm/IR/CallSite.h"
21
#include "llvm/IR/Constants.h"
22
#include "llvm/IR/Instruction.h"
23
#include "llvm/IR/Intrinsics.h"
24
#include <cassert>
25
#include <cstdint>
26
27
namespace llvm {
28
29
class AddOperator;
30
class APInt;
31
class AssumptionCache;
32
class DataLayout;
33
class DominatorTree;
34
class GEPOperator;
35
class IntrinsicInst;
36
struct KnownBits;
37
class Loop;
38
class LoopInfo;
39
class MDNode;
40
class OptimizationRemarkEmitter;
41
class StringRef;
42
class TargetLibraryInfo;
43
class Value;
44
45
  /// Determine which bits of V are known to be either zero or one and return
46
  /// them in the KnownZero/KnownOne bit sets.
47
  ///
48
  /// This function is defined on values with integer type, values with pointer
49
  /// type, and vectors of integers.  In the case
50
  /// where V is a vector, the known zero and known one values are the
51
  /// same width as the vector element, and the bit is set only if it is true
52
  /// for all of the elements in the vector.
53
  void computeKnownBits(const Value *V, KnownBits &Known,
54
                        const DataLayout &DL, unsigned Depth = 0,
55
                        AssumptionCache *AC = nullptr,
56
                        const Instruction *CxtI = nullptr,
57
                        const DominatorTree *DT = nullptr,
58
                        OptimizationRemarkEmitter *ORE = nullptr,
59
                        bool UseInstrInfo = true);
60
61
  /// Returns the known bits rather than passing by reference.
62
  KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
63
                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
64
                             const Instruction *CxtI = nullptr,
65
                             const DominatorTree *DT = nullptr,
66
                             OptimizationRemarkEmitter *ORE = nullptr,
67
                             bool UseInstrInfo = true);
68
69
  /// Compute known bits from the range metadata.
70
  /// \p KnownZero the set of bits that are known to be zero
71
  /// \p KnownOne the set of bits that are known to be one
72
  void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
73
                                         KnownBits &Known);
74
75
  /// Return true if LHS and RHS have no common bits set.
76
  bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
77
                           const DataLayout &DL,
78
                           AssumptionCache *AC = nullptr,
79
                           const Instruction *CxtI = nullptr,
80
                           const DominatorTree *DT = nullptr,
81
                           bool UseInstrInfo = true);
82
83
  /// Return true if the given value is known to have exactly one bit set when
84
  /// defined. For vectors return true if every element is known to be a power
85
  /// of two when defined. Supports values with integer or pointer type and
86
  /// vectors of integers. If 'OrZero' is set, then return true if the given
87
  /// value is either a power of two or zero.
88
  bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
89
                              bool OrZero = false, unsigned Depth = 0,
90
                              AssumptionCache *AC = nullptr,
91
                              const Instruction *CxtI = nullptr,
92
                              const DominatorTree *DT = nullptr,
93
                              bool UseInstrInfo = true);
94
95
  bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
96
97
  /// Return true if the given value is known to be non-zero when defined. For
98
  /// vectors, return true if every element is known to be non-zero when
99
  /// defined. For pointers, if the context instruction and dominator tree are
100
  /// specified, perform context-sensitive analysis and return true if the
101
  /// pointer couldn't possibly be null at the specified instruction.
102
  /// Supports values with integer or pointer type and vectors of integers.
103
  bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
104
                      AssumptionCache *AC = nullptr,
105
                      const Instruction *CxtI = nullptr,
106
                      const DominatorTree *DT = nullptr,
107
                      bool UseInstrInfo = true);
108
109
  /// Return true if the two given values are negation.
110
  /// Currently can recoginze Value pair:
111
  /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
112
  /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
113
  bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
114
115
  /// Returns true if the give value is known to be non-negative.
116
  bool isKnownNonNegative(const Value *V, const DataLayout &DL,
117
                          unsigned Depth = 0,
118
                          AssumptionCache *AC = nullptr,
119
                          const Instruction *CxtI = nullptr,
120
                          const DominatorTree *DT = nullptr,
121
                          bool UseInstrInfo = true);
122
123
  /// Returns true if the given value is known be positive (i.e. non-negative
124
  /// and non-zero).
125
  bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
126
                       AssumptionCache *AC = nullptr,
127
                       const Instruction *CxtI = nullptr,
128
                       const DominatorTree *DT = nullptr,
129
                       bool UseInstrInfo = true);
130
131
  /// Returns true if the given value is known be negative (i.e. non-positive
132
  /// and non-zero).
133
  bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
134
                       AssumptionCache *AC = nullptr,
135
                       const Instruction *CxtI = nullptr,
136
                       const DominatorTree *DT = nullptr,
137
                       bool UseInstrInfo = true);
138
139
  /// Return true if the given values are known to be non-equal when defined.
140
  /// Supports scalar integer types only.
141
  bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
142
                       AssumptionCache *AC = nullptr,
143
                       const Instruction *CxtI = nullptr,
144
                       const DominatorTree *DT = nullptr,
145
                       bool UseInstrInfo = true);
146
147
  /// Return true if 'V & Mask' is known to be zero. We use this predicate to
148
  /// simplify operations downstream. Mask is known to be zero for bits that V
149
  /// cannot have.
150
  ///
151
  /// This function is defined on values with integer type, values with pointer
152
  /// type, and vectors of integers.  In the case
153
  /// where V is a vector, the mask, known zero, and known one values are the
154
  /// same width as the vector element, and the bit is set only if it is true
155
  /// for all of the elements in the vector.
156
  bool MaskedValueIsZero(const Value *V, const APInt &Mask,
157
                         const DataLayout &DL,
158
                         unsigned Depth = 0, AssumptionCache *AC = nullptr,
159
                         const Instruction *CxtI = nullptr,
160
                         const DominatorTree *DT = nullptr,
161
                         bool UseInstrInfo = true);
162
163
  /// Return the number of times the sign bit of the register is replicated into
164
  /// the other bits. We know that at least 1 bit is always equal to the sign
165
  /// bit (itself), but other cases can give us information. For example,
166
  /// immediately after an "ashr X, 2", we know that the top 3 bits are all
167
  /// equal to each other, so we return 3. For vectors, return the number of
168
  /// sign bits for the vector element with the mininum number of known sign
169
  /// bits.
170
  unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
171
                              unsigned Depth = 0, AssumptionCache *AC = nullptr,
172
                              const Instruction *CxtI = nullptr,
173
                              const DominatorTree *DT = nullptr,
174
                              bool UseInstrInfo = true);
175
176
  /// This function computes the integer multiple of Base that equals V. If
177
  /// successful, it returns true and returns the multiple in Multiple. If
178
  /// unsuccessful, it returns false. Also, if V can be simplified to an
179
  /// integer, then the simplified V is returned in Val. Look through sext only
180
  /// if LookThroughSExt=true.
181
  bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
182
                       bool LookThroughSExt = false,
183
                       unsigned Depth = 0);
184
185
  /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
186
  /// intrinsics are treated as-if they were intrinsics.
187
  Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS,
188
                                        const TargetLibraryInfo *TLI);
189
190
  /// Return true if we can prove that the specified FP value is never equal to
191
  /// -0.0.
192
  bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
193
                            unsigned Depth = 0);
194
195
  /// Return true if we can prove that the specified FP value is either NaN or
196
  /// never less than -0.0.
197
  ///
198
  ///      NaN --> true
199
  ///       +0 --> true
200
  ///       -0 --> true
201
  ///   x > +0 --> true
202
  ///   x < -0 --> false
203
  bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
204
205
  /// Return true if the floating-point scalar value is not a NaN or if the
206
  /// floating-point vector value has no NaN elements. Return false if a value
207
  /// could ever be NaN.
208
  bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
209
                       unsigned Depth = 0);
210
211
  /// Return true if we can prove that the specified FP value's sign bit is 0.
212
  ///
213
  ///      NaN --> true/false (depending on the NaN's sign bit)
214
  ///       +0 --> true
215
  ///       -0 --> false
216
  ///   x > +0 --> true
217
  ///   x < -0 --> false
218
  bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
219
220
  /// If the specified value can be set by repeating the same byte in memory,
221
  /// return the i8 value that it is represented with. This is true for all i8
222
  /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
223
  /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
224
  /// i16 0x1234), return null.
225
  Value *isBytewiseValue(Value *V);
226
227
  /// Given an aggregrate and an sequence of indices, see if the scalar value
228
  /// indexed is already around as a register, for example if it were inserted
229
  /// directly into the aggregrate.
230
  ///
231
  /// If InsertBefore is not null, this function will duplicate (modified)
232
  /// insertvalues when a part of a nested struct is extracted.
233
  Value *FindInsertedValue(Value *V,
234
                           ArrayRef<unsigned> idx_range,
235
                           Instruction *InsertBefore = nullptr);
236
237
  /// Analyze the specified pointer to see if it can be expressed as a base
238
  /// pointer plus a constant offset. Return the base and offset to the caller.
239
  Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
240
                                          const DataLayout &DL);
241
  inline const Value *GetPointerBaseWithConstantOffset(const Value *Ptr,
242
                                                       int64_t &Offset,
243
152k
                                                       const DataLayout &DL) {
244
152k
    return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
245
152k
                                            DL);
246
152k
  }
247
248
  /// Returns true if the GEP is based on a pointer to a string (array of
249
  // \p CharSize integers) and is indexing into this string.
250
  bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
251
                                   unsigned CharSize = 8);
252
253
  /// Represents offset+length into a ConstantDataArray.
254
  struct ConstantDataArraySlice {
255
    /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
256
    /// initializer, it just doesn't fit the ConstantDataArray interface).
257
    const ConstantDataArray *Array;
258
259
    /// Slice starts at this Offset.
260
    uint64_t Offset;
261
262
    /// Length of the slice.
263
    uint64_t Length;
264
265
    /// Moves the Offset and adjusts Length accordingly.
266
1.87k
    void move(uint64_t Delta) {
267
1.87k
      assert(Delta < Length);
268
1.87k
      Offset += Delta;
269
1.87k
      Length -= Delta;
270
1.87k
    }
271
272
    /// Convenience accessor for elements in the slice.
273
7.72k
    uint64_t operator[](unsigned I) const {
274
7.72k
      return Array==nullptr ? 
00
: Array->getElementAsInteger(I + Offset);
275
7.72k
    }
276
  };
277
278
  /// Returns true if the value \p V is a pointer into a ConstantDataArray.
279
  /// If successful \p Slice will point to a ConstantDataArray info object
280
  /// with an appropriate offset.
281
  bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
282
                                unsigned ElementSize, uint64_t Offset = 0);
283
284
  /// This function computes the length of a null-terminated C string pointed to
285
  /// by V. If successful, it returns true and returns the string in Str. If
286
  /// unsuccessful, it returns false. This does not include the trailing null
287
  /// character by default. If TrimAtNul is set to false, then this returns any
288
  /// trailing null characters as well as any other characters that come after
289
  /// it.
290
  bool getConstantStringInfo(const Value *V, StringRef &Str,
291
                             uint64_t Offset = 0, bool TrimAtNul = true);
292
293
  /// If we can compute the length of the string pointed to by the specified
294
  /// pointer, return 'len+1'.  If we can't, return 0.
295
  uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
296
297
  /// This function returns call pointer argument that is considered the same by
298
  /// aliasing rules. You CAN'T use it to replace one value with another.
299
  const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS);
300
14.6M
  inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) {
301
14.6M
    return const_cast<Value *>(
302
14.6M
        getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS)));
303
14.6M
  }
304
305
  // {launder,strip}.invariant.group returns pointer that aliases its argument,
306
  // and it only captures pointer by returning it.
307
  // These intrinsics are not marked as nocapture, because returning is
308
  // considered as capture. The arguments are not marked as returned neither,
309
  // because it would make it useless.
310
  bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
311
      ImmutableCallSite CS);
312
313
  /// This method strips off any GEP address adjustments and pointer casts from
314
  /// the specified value, returning the original object being addressed. Note
315
  /// that the returned value has pointer type if the specified value does. If
316
  /// the MaxLookup value is non-zero, it limits the number of instructions to
317
  /// be stripped off.
318
  Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
319
                             unsigned MaxLookup = 6);
320
  inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
321
229M
                                          unsigned MaxLookup = 6) {
322
229M
    return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
323
229M
  }
324
325
  /// This method is similar to GetUnderlyingObject except that it can
326
  /// look through phi and select instructions and return multiple objects.
327
  ///
328
  /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
329
  /// accesses different objects in each iteration, we don't look through the
330
  /// phi node. E.g. consider this loop nest:
331
  ///
332
  ///   int **A;
333
  ///   for (i)
334
  ///     for (j) {
335
  ///        A[i][j] = A[i-1][j] * B[j]
336
  ///     }
337
  ///
338
  /// This is transformed by Load-PRE to stash away A[i] for the next iteration
339
  /// of the outer loop:
340
  ///
341
  ///   Curr = A[0];          // Prev_0
342
  ///   for (i: 1..N) {
343
  ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
344
  ///     Curr = A[i];
345
  ///     for (j: 0..N) {
346
  ///        Curr[j] = Prev[j] * B[j]
347
  ///     }
348
  ///   }
349
  ///
350
  /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
351
  /// should not assume that Curr and Prev share the same underlying object thus
352
  /// it shouldn't look through the phi above.
353
  void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
354
                            const DataLayout &DL, LoopInfo *LI = nullptr,
355
                            unsigned MaxLookup = 6);
356
357
  /// This is a wrapper around GetUnderlyingObjects and adds support for basic
358
  /// ptrtoint+arithmetic+inttoptr sequences.
359
  bool getUnderlyingObjectsForCodeGen(const Value *V,
360
                            SmallVectorImpl<Value *> &Objects,
361
                            const DataLayout &DL);
362
363
  /// Return true if the only users of this pointer are lifetime markers.
364
  bool onlyUsedByLifetimeMarkers(const Value *V);
365
366
  /// Return true if the instruction does not have any effects besides
367
  /// calculating the result and does not have undefined behavior.
368
  ///
369
  /// This method never returns true for an instruction that returns true for
370
  /// mayHaveSideEffects; however, this method also does some other checks in
371
  /// addition. It checks for undefined behavior, like dividing by zero or
372
  /// loading from an invalid pointer (but not for undefined results, like a
373
  /// shift with a shift amount larger than the width of the result). It checks
374
  /// for malloc and alloca because speculatively executing them might cause a
375
  /// memory leak. It also returns false for instructions related to control
376
  /// flow, specifically terminators and PHI nodes.
377
  ///
378
  /// If the CtxI is specified this method performs context-sensitive analysis
379
  /// and returns true if it is safe to execute the instruction immediately
380
  /// before the CtxI.
381
  ///
382
  /// If the CtxI is NOT specified this method only looks at the instruction
383
  /// itself and its operands, so if this method returns true, it is safe to
384
  /// move the instruction as long as the correct dominance relationships for
385
  /// the operands and users hold.
386
  ///
387
  /// This method can return true for instructions that read memory;
388
  /// for such instructions, moving them may change the resulting value.
389
  bool isSafeToSpeculativelyExecute(const Value *V,
390
                                    const Instruction *CtxI = nullptr,
391
                                    const DominatorTree *DT = nullptr);
392
393
  /// Returns true if the result or effects of the given instructions \p I
394
  /// depend on or influence global memory.
395
  /// Memory dependence arises for example if the instruction reads from
396
  /// memory or may produce effects or undefined behaviour. Memory dependent
397
  /// instructions generally cannot be reorderd with respect to other memory
398
  /// dependent instructions or moved into non-dominated basic blocks.
399
  /// Instructions which just compute a value based on the values of their
400
  /// operands are not memory dependent.
401
  bool mayBeMemoryDependent(const Instruction &I);
402
403
  /// Return true if it is an intrinsic that cannot be speculated but also
404
  /// cannot trap.
405
  bool isAssumeLikeIntrinsic(const Instruction *I);
406
407
  /// Return true if it is valid to use the assumptions provided by an
408
  /// assume intrinsic, I, at the point in the control-flow identified by the
409
  /// context instruction, CxtI.
410
  bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
411
                               const DominatorTree *DT = nullptr);
412
413
  enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
414
415
  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
416
                                               const Value *RHS,
417
                                               const DataLayout &DL,
418
                                               AssumptionCache *AC,
419
                                               const Instruction *CxtI,
420
                                               const DominatorTree *DT,
421
                                               bool UseInstrInfo = true);
422
  OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
423
                                             const DataLayout &DL,
424
                                             AssumptionCache *AC,
425
                                             const Instruction *CxtI,
426
                                             const DominatorTree *DT,
427
                                             bool UseInstrInfo = true);
428
  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
429
                                               const Value *RHS,
430
                                               const DataLayout &DL,
431
                                               AssumptionCache *AC,
432
                                               const Instruction *CxtI,
433
                                               const DominatorTree *DT,
434
                                               bool UseInstrInfo = true);
435
  OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
436
                                             const DataLayout &DL,
437
                                             AssumptionCache *AC = nullptr,
438
                                             const Instruction *CxtI = nullptr,
439
                                             const DominatorTree *DT = nullptr);
440
  /// This version also leverages the sign bit of Add if known.
441
  OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
442
                                             const DataLayout &DL,
443
                                             AssumptionCache *AC = nullptr,
444
                                             const Instruction *CxtI = nullptr,
445
                                             const DominatorTree *DT = nullptr);
446
  OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
447
                                               const DataLayout &DL,
448
                                               AssumptionCache *AC,
449
                                               const Instruction *CxtI,
450
                                               const DominatorTree *DT);
451
  OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
452
                                             const DataLayout &DL,
453
                                             AssumptionCache *AC,
454
                                             const Instruction *CxtI,
455
                                             const DominatorTree *DT);
456
457
  /// Returns true if the arithmetic part of the \p II 's result is
458
  /// used only along the paths control dependent on the computation
459
  /// not overflowing, \p II being an <op>.with.overflow intrinsic.
460
  bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
461
                                 const DominatorTree &DT);
462
463
  /// Return true if this function can prove that the instruction I will
464
  /// always transfer execution to one of its successors (including the next
465
  /// instruction that follows within a basic block). E.g. this is not
466
  /// guaranteed for function calls that could loop infinitely.
467
  ///
468
  /// In other words, this function returns false for instructions that may
469
  /// transfer execution or fail to transfer execution in a way that is not
470
  /// captured in the CFG nor in the sequence of instructions within a basic
471
  /// block.
472
  ///
473
  /// Undefined behavior is assumed not to happen, so e.g. division is
474
  /// guaranteed to transfer execution to the following instruction even
475
  /// though division by zero might cause undefined behavior.
476
  bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
477
478
  /// Returns true if this block does not contain a potential implicit exit.
479
  /// This is equivelent to saying that all instructions within the basic block
480
  /// are guaranteed to transfer execution to their successor within the basic
481
  /// block. This has the same assumptions w.r.t. undefined behavior as the
482
  /// instruction variant of this function.
483
  bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
484
485
  /// Return true if this function can prove that the instruction I
486
  /// is executed for every iteration of the loop L.
487
  ///
488
  /// Note that this currently only considers the loop header.
489
  bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
490
                                              const Loop *L);
491
492
  /// Return true if this function can prove that I is guaranteed to yield
493
  /// full-poison (all bits poison) if at least one of its operands are
494
  /// full-poison (all bits poison).
495
  ///
496
  /// The exact rules for how poison propagates through instructions have
497
  /// not been settled as of 2015-07-10, so this function is conservative
498
  /// and only considers poison to be propagated in uncontroversial
499
  /// cases. There is no attempt to track values that may be only partially
500
  /// poison.
501
  bool propagatesFullPoison(const Instruction *I);
502
503
  /// Return either nullptr or an operand of I such that I will trigger
504
  /// undefined behavior if I is executed and that operand has a full-poison
505
  /// value (all bits poison).
506
  const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
507
508
  /// Return true if this function can prove that if PoisonI is executed
509
  /// and yields a full-poison value (all bits poison), then that will
510
  /// trigger undefined behavior.
511
  ///
512
  /// Note that this currently only considers the basic block that is
513
  /// the parent of I.
514
  bool programUndefinedIfFullPoison(const Instruction *PoisonI);
515
516
  /// Specific patterns of select instructions we can match.
517
  enum SelectPatternFlavor {
518
    SPF_UNKNOWN = 0,
519
    SPF_SMIN,                   /// Signed minimum
520
    SPF_UMIN,                   /// Unsigned minimum
521
    SPF_SMAX,                   /// Signed maximum
522
    SPF_UMAX,                   /// Unsigned maximum
523
    SPF_FMINNUM,                /// Floating point minnum
524
    SPF_FMAXNUM,                /// Floating point maxnum
525
    SPF_ABS,                    /// Absolute value
526
    SPF_NABS                    /// Negated absolute value
527
  };
528
529
  /// Behavior when a floating point min/max is given one NaN and one
530
  /// non-NaN as input.
531
  enum SelectPatternNaNBehavior {
532
    SPNB_NA = 0,                /// NaN behavior not applicable.
533
    SPNB_RETURNS_NAN,           /// Given one NaN input, returns the NaN.
534
    SPNB_RETURNS_OTHER,         /// Given one NaN input, returns the non-NaN.
535
    SPNB_RETURNS_ANY            /// Given one NaN input, can return either (or
536
                                /// it has been determined that no operands can
537
                                /// be NaN).
538
  };
539
540
  struct SelectPatternResult {
541
    SelectPatternFlavor Flavor;
542
    SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
543
                                          /// SPF_FMINNUM or SPF_FMAXNUM.
544
    bool Ordered;               /// When implementing this min/max pattern as
545
                                /// fcmp; select, does the fcmp have to be
546
                                /// ordered?
547
548
    /// Return true if \p SPF is a min or a max pattern.
549
19.8M
    static bool isMinOrMax(SelectPatternFlavor SPF) {
550
19.8M
      return SPF != SPF_UNKNOWN && 
SPF != SPF_ABS7.15M
&&
SPF != SPF_NABS6.34M
;
551
19.8M
    }
552
  };
553
554
  /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
555
  /// and providing the out parameter results if we successfully match.
556
  ///
557
  /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
558
  /// the negation instruction from the idiom.
559
  ///
560
  /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
561
  /// not match that of the original select. If this is the case, the cast
562
  /// operation (one of Trunc,SExt,Zext) that must be done to transform the
563
  /// type of LHS and RHS into the type of V is returned in CastOp.
564
  ///
565
  /// For example:
566
  ///   %1 = icmp slt i32 %a, i32 4
567
  ///   %2 = sext i32 %a to i64
568
  ///   %3 = select i1 %1, i64 %2, i64 4
569
  ///
570
  /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
571
  ///
572
  SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
573
                                         Instruction::CastOps *CastOp = nullptr,
574
                                         unsigned Depth = 0);
575
  inline SelectPatternResult
576
  matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
577
14.1M
                     Instruction::CastOps *CastOp = nullptr) {
578
14.1M
    Value *L = const_cast<Value*>(LHS);
579
14.1M
    Value *R = const_cast<Value*>(RHS);
580
14.1M
    auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
581
14.1M
    LHS = L;
582
14.1M
    RHS = R;
583
14.1M
    return Result;
584
14.1M
  }
585
586
  /// Return the canonical comparison predicate for the specified
587
  /// minimum/maximum flavor.
588
  CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
589
                                   bool Ordered = false);
590
591
  /// Return the inverse minimum/maximum flavor of the specified flavor.
592
  /// For example, signed minimum is the inverse of signed maximum.
593
  SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
594
595
  /// Return the canonical inverse comparison predicate for the specified
596
  /// minimum/maximum flavor.
597
  CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
598
599
  /// Return true if RHS is known to be implied true by LHS.  Return false if
600
  /// RHS is known to be implied false by LHS.  Otherwise, return None if no
601
  /// implication can be made.
602
  /// A & B must be i1 (boolean) values or a vector of such values. Note that
603
  /// the truth table for implication is the same as <=u on i1 values (but not
604
  /// <=s!).  The truth table for both is:
605
  ///    | T | F (B)
606
  ///  T | T | F
607
  ///  F | T | T
608
  /// (A)
609
  Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
610
                                    const DataLayout &DL, bool LHSIsTrue = true,
611
                                    unsigned Depth = 0);
612
} // end namespace llvm
613
614
#endif // LLVM_ANALYSIS_VALUETRACKING_H