Coverage Report

Created: 2019-02-20 00:17

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/ValueTracking.h
Line
Count
Source (jump to first uncovered line)
1
//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains routines that help analyze properties that chains of
10
// computations have.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#ifndef LLVM_ANALYSIS_VALUETRACKING_H
15
#define LLVM_ANALYSIS_VALUETRACKING_H
16
17
#include "llvm/ADT/ArrayRef.h"
18
#include "llvm/ADT/Optional.h"
19
#include "llvm/IR/CallSite.h"
20
#include "llvm/IR/Constants.h"
21
#include "llvm/IR/Instruction.h"
22
#include "llvm/IR/Intrinsics.h"
23
#include <cassert>
24
#include <cstdint>
25
26
namespace llvm {
27
28
class AddOperator;
29
class APInt;
30
class AssumptionCache;
31
class DataLayout;
32
class DominatorTree;
33
class GEPOperator;
34
class IntrinsicInst;
35
struct KnownBits;
36
class Loop;
37
class LoopInfo;
38
class MDNode;
39
class OptimizationRemarkEmitter;
40
class StringRef;
41
class TargetLibraryInfo;
42
class Value;
43
44
  /// Determine which bits of V are known to be either zero or one and return
45
  /// them in the KnownZero/KnownOne bit sets.
46
  ///
47
  /// This function is defined on values with integer type, values with pointer
48
  /// type, and vectors of integers.  In the case
49
  /// where V is a vector, the known zero and known one values are the
50
  /// same width as the vector element, and the bit is set only if it is true
51
  /// for all of the elements in the vector.
52
  void computeKnownBits(const Value *V, KnownBits &Known,
53
                        const DataLayout &DL, unsigned Depth = 0,
54
                        AssumptionCache *AC = nullptr,
55
                        const Instruction *CxtI = nullptr,
56
                        const DominatorTree *DT = nullptr,
57
                        OptimizationRemarkEmitter *ORE = nullptr,
58
                        bool UseInstrInfo = true);
59
60
  /// Returns the known bits rather than passing by reference.
61
  KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
62
                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
63
                             const Instruction *CxtI = nullptr,
64
                             const DominatorTree *DT = nullptr,
65
                             OptimizationRemarkEmitter *ORE = nullptr,
66
                             bool UseInstrInfo = true);
67
68
  /// Compute known bits from the range metadata.
69
  /// \p KnownZero the set of bits that are known to be zero
70
  /// \p KnownOne the set of bits that are known to be one
71
  void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
72
                                         KnownBits &Known);
73
74
  /// Return true if LHS and RHS have no common bits set.
75
  bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
76
                           const DataLayout &DL,
77
                           AssumptionCache *AC = nullptr,
78
                           const Instruction *CxtI = nullptr,
79
                           const DominatorTree *DT = nullptr,
80
                           bool UseInstrInfo = true);
81
82
  /// Return true if the given value is known to have exactly one bit set when
83
  /// defined. For vectors return true if every element is known to be a power
84
  /// of two when defined. Supports values with integer or pointer type and
85
  /// vectors of integers. If 'OrZero' is set, then return true if the given
86
  /// value is either a power of two or zero.
87
  bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
88
                              bool OrZero = false, unsigned Depth = 0,
89
                              AssumptionCache *AC = nullptr,
90
                              const Instruction *CxtI = nullptr,
91
                              const DominatorTree *DT = nullptr,
92
                              bool UseInstrInfo = true);
93
94
  bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
95
96
  /// Return true if the given value is known to be non-zero when defined. For
97
  /// vectors, return true if every element is known to be non-zero when
98
  /// defined. For pointers, if the context instruction and dominator tree are
99
  /// specified, perform context-sensitive analysis and return true if the
100
  /// pointer couldn't possibly be null at the specified instruction.
101
  /// Supports values with integer or pointer type and vectors of integers.
102
  bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
103
                      AssumptionCache *AC = nullptr,
104
                      const Instruction *CxtI = nullptr,
105
                      const DominatorTree *DT = nullptr,
106
                      bool UseInstrInfo = true);
107
108
  /// Return true if the two given values are negation.
109
  /// Currently can recoginze Value pair:
110
  /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
111
  /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
112
  bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
113
114
  /// Returns true if the give value is known to be non-negative.
115
  bool isKnownNonNegative(const Value *V, const DataLayout &DL,
116
                          unsigned Depth = 0,
117
                          AssumptionCache *AC = nullptr,
118
                          const Instruction *CxtI = nullptr,
119
                          const DominatorTree *DT = nullptr,
120
                          bool UseInstrInfo = true);
121
122
  /// Returns true if the given value is known be positive (i.e. non-negative
123
  /// and non-zero).
124
  bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
125
                       AssumptionCache *AC = nullptr,
126
                       const Instruction *CxtI = nullptr,
127
                       const DominatorTree *DT = nullptr,
128
                       bool UseInstrInfo = true);
129
130
  /// Returns true if the given value is known be negative (i.e. non-positive
131
  /// and non-zero).
132
  bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
133
                       AssumptionCache *AC = nullptr,
134
                       const Instruction *CxtI = nullptr,
135
                       const DominatorTree *DT = nullptr,
136
                       bool UseInstrInfo = true);
137
138
  /// Return true if the given values are known to be non-equal when defined.
139
  /// Supports scalar integer types only.
140
  bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
141
                       AssumptionCache *AC = nullptr,
142
                       const Instruction *CxtI = nullptr,
143
                       const DominatorTree *DT = nullptr,
144
                       bool UseInstrInfo = true);
145
146
  /// Return true if 'V & Mask' is known to be zero. We use this predicate to
147
  /// simplify operations downstream. Mask is known to be zero for bits that V
148
  /// cannot have.
149
  ///
150
  /// This function is defined on values with integer type, values with pointer
151
  /// type, and vectors of integers.  In the case
152
  /// where V is a vector, the mask, known zero, and known one values are the
153
  /// same width as the vector element, and the bit is set only if it is true
154
  /// for all of the elements in the vector.
155
  bool MaskedValueIsZero(const Value *V, const APInt &Mask,
156
                         const DataLayout &DL,
157
                         unsigned Depth = 0, AssumptionCache *AC = nullptr,
158
                         const Instruction *CxtI = nullptr,
159
                         const DominatorTree *DT = nullptr,
160
                         bool UseInstrInfo = true);
161
162
  /// Return the number of times the sign bit of the register is replicated into
163
  /// the other bits. We know that at least 1 bit is always equal to the sign
164
  /// bit (itself), but other cases can give us information. For example,
165
  /// immediately after an "ashr X, 2", we know that the top 3 bits are all
166
  /// equal to each other, so we return 3. For vectors, return the number of
167
  /// sign bits for the vector element with the mininum number of known sign
168
  /// bits.
169
  unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
170
                              unsigned Depth = 0, AssumptionCache *AC = nullptr,
171
                              const Instruction *CxtI = nullptr,
172
                              const DominatorTree *DT = nullptr,
173
                              bool UseInstrInfo = true);
174
175
  /// This function computes the integer multiple of Base that equals V. If
176
  /// successful, it returns true and returns the multiple in Multiple. If
177
  /// unsuccessful, it returns false. Also, if V can be simplified to an
178
  /// integer, then the simplified V is returned in Val. Look through sext only
179
  /// if LookThroughSExt=true.
180
  bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
181
                       bool LookThroughSExt = false,
182
                       unsigned Depth = 0);
183
184
  /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
185
  /// intrinsics are treated as-if they were intrinsics.
186
  Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS,
187
                                        const TargetLibraryInfo *TLI);
188
189
  /// Return true if we can prove that the specified FP value is never equal to
190
  /// -0.0.
191
  bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
192
                            unsigned Depth = 0);
193
194
  /// Return true if we can prove that the specified FP value is either NaN or
195
  /// never less than -0.0.
196
  ///
197
  ///      NaN --> true
198
  ///       +0 --> true
199
  ///       -0 --> true
200
  ///   x > +0 --> true
201
  ///   x < -0 --> false
202
  bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
203
204
  /// Return true if the floating-point scalar value is not a NaN or if the
205
  /// floating-point vector value has no NaN elements. Return false if a value
206
  /// could ever be NaN.
207
  bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
208
                       unsigned Depth = 0);
209
210
  /// Return true if we can prove that the specified FP value's sign bit is 0.
211
  ///
212
  ///      NaN --> true/false (depending on the NaN's sign bit)
213
  ///       +0 --> true
214
  ///       -0 --> false
215
  ///   x > +0 --> true
216
  ///   x < -0 --> false
217
  bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
218
219
  /// If the specified value can be set by repeating the same byte in memory,
220
  /// return the i8 value that it is represented with. This is true for all i8
221
  /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
222
  /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
223
  /// i16 0x1234), return null. If the value is entirely undef and padding,
224
  /// return undef.
225
  Value *isBytewiseValue(Value *V);
226
227
  /// Given an aggregrate and an sequence of indices, see if the scalar value
228
  /// indexed is already around as a register, for example if it were inserted
229
  /// directly into the aggregrate.
230
  ///
231
  /// If InsertBefore is not null, this function will duplicate (modified)
232
  /// insertvalues when a part of a nested struct is extracted.
233
  Value *FindInsertedValue(Value *V,
234
                           ArrayRef<unsigned> idx_range,
235
                           Instruction *InsertBefore = nullptr);
236
237
  /// Analyze the specified pointer to see if it can be expressed as a base
238
  /// pointer plus a constant offset. Return the base and offset to the caller.
239
  Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
240
                                          const DataLayout &DL);
241
  inline const Value *GetPointerBaseWithConstantOffset(const Value *Ptr,
242
                                                       int64_t &Offset,
243
155k
                                                       const DataLayout &DL) {
244
155k
    return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
245
155k
                                            DL);
246
155k
  }
247
248
  /// Returns true if the GEP is based on a pointer to a string (array of
249
  // \p CharSize integers) and is indexing into this string.
250
  bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
251
                                   unsigned CharSize = 8);
252
253
  /// Represents offset+length into a ConstantDataArray.
254
  struct ConstantDataArraySlice {
255
    /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
256
    /// initializer, it just doesn't fit the ConstantDataArray interface).
257
    const ConstantDataArray *Array;
258
259
    /// Slice starts at this Offset.
260
    uint64_t Offset;
261
262
    /// Length of the slice.
263
    uint64_t Length;
264
265
    /// Moves the Offset and adjusts Length accordingly.
266
910
    void move(uint64_t Delta) {
267
910
      assert(Delta < Length);
268
910
      Offset += Delta;
269
910
      Length -= Delta;
270
910
    }
271
272
    /// Convenience accessor for elements in the slice.
273
4.23k
    uint64_t operator[](unsigned I) const {
274
4.23k
      return Array==nullptr ? 
00
: Array->getElementAsInteger(I + Offset);
275
4.23k
    }
276
  };
277
278
  /// Returns true if the value \p V is a pointer into a ConstantDataArray.
279
  /// If successful \p Slice will point to a ConstantDataArray info object
280
  /// with an appropriate offset.
281
  bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
282
                                unsigned ElementSize, uint64_t Offset = 0);
283
284
  /// This function computes the length of a null-terminated C string pointed to
285
  /// by V. If successful, it returns true and returns the string in Str. If
286
  /// unsuccessful, it returns false. This does not include the trailing null
287
  /// character by default. If TrimAtNul is set to false, then this returns any
288
  /// trailing null characters as well as any other characters that come after
289
  /// it.
290
  bool getConstantStringInfo(const Value *V, StringRef &Str,
291
                             uint64_t Offset = 0, bool TrimAtNul = true);
292
293
  /// If we can compute the length of the string pointed to by the specified
294
  /// pointer, return 'len+1'.  If we can't, return 0.
295
  uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
296
297
  /// This function returns call pointer argument that is considered the same by
298
  /// aliasing rules. You CAN'T use it to replace one value with another.
299
  const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call);
300
15.1M
  inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call) {
301
15.1M
    return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
302
15.1M
        const_cast<const CallBase *>(Call)));
303
15.1M
  }
304
305
  // {launder,strip}.invariant.group returns pointer that aliases its argument,
306
  // and it only captures pointer by returning it.
307
  // These intrinsics are not marked as nocapture, because returning is
308
  // considered as capture. The arguments are not marked as returned neither,
309
  // because it would make it useless.
310
  bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
311
      const CallBase *Call);
312
313
  /// This method strips off any GEP address adjustments and pointer casts from
314
  /// the specified value, returning the original object being addressed. Note
315
  /// that the returned value has pointer type if the specified value does. If
316
  /// the MaxLookup value is non-zero, it limits the number of instructions to
317
  /// be stripped off.
318
  Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
319
                             unsigned MaxLookup = 6);
320
  inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
321
244M
                                          unsigned MaxLookup = 6) {
322
244M
    return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
323
244M
  }
324
325
  /// This method is similar to GetUnderlyingObject except that it can
326
  /// look through phi and select instructions and return multiple objects.
327
  ///
328
  /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
329
  /// accesses different objects in each iteration, we don't look through the
330
  /// phi node. E.g. consider this loop nest:
331
  ///
332
  ///   int **A;
333
  ///   for (i)
334
  ///     for (j) {
335
  ///        A[i][j] = A[i-1][j] * B[j]
336
  ///     }
337
  ///
338
  /// This is transformed by Load-PRE to stash away A[i] for the next iteration
339
  /// of the outer loop:
340
  ///
341
  ///   Curr = A[0];          // Prev_0
342
  ///   for (i: 1..N) {
343
  ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
344
  ///     Curr = A[i];
345
  ///     for (j: 0..N) {
346
  ///        Curr[j] = Prev[j] * B[j]
347
  ///     }
348
  ///   }
349
  ///
350
  /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
351
  /// should not assume that Curr and Prev share the same underlying object thus
352
  /// it shouldn't look through the phi above.
353
  void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
354
                            const DataLayout &DL, LoopInfo *LI = nullptr,
355
                            unsigned MaxLookup = 6);
356
357
  /// This is a wrapper around GetUnderlyingObjects and adds support for basic
358
  /// ptrtoint+arithmetic+inttoptr sequences.
359
  bool getUnderlyingObjectsForCodeGen(const Value *V,
360
                            SmallVectorImpl<Value *> &Objects,
361
                            const DataLayout &DL);
362
363
  /// Return true if the only users of this pointer are lifetime markers.
364
  bool onlyUsedByLifetimeMarkers(const Value *V);
365
366
  /// Return true if the instruction does not have any effects besides
367
  /// calculating the result and does not have undefined behavior.
368
  ///
369
  /// This method never returns true for an instruction that returns true for
370
  /// mayHaveSideEffects; however, this method also does some other checks in
371
  /// addition. It checks for undefined behavior, like dividing by zero or
372
  /// loading from an invalid pointer (but not for undefined results, like a
373
  /// shift with a shift amount larger than the width of the result). It checks
374
  /// for malloc and alloca because speculatively executing them might cause a
375
  /// memory leak. It also returns false for instructions related to control
376
  /// flow, specifically terminators and PHI nodes.
377
  ///
378
  /// If the CtxI is specified this method performs context-sensitive analysis
379
  /// and returns true if it is safe to execute the instruction immediately
380
  /// before the CtxI.
381
  ///
382
  /// If the CtxI is NOT specified this method only looks at the instruction
383
  /// itself and its operands, so if this method returns true, it is safe to
384
  /// move the instruction as long as the correct dominance relationships for
385
  /// the operands and users hold.
386
  ///
387
  /// This method can return true for instructions that read memory;
388
  /// for such instructions, moving them may change the resulting value.
389
  bool isSafeToSpeculativelyExecute(const Value *V,
390
                                    const Instruction *CtxI = nullptr,
391
                                    const DominatorTree *DT = nullptr);
392
393
  /// Returns true if the result or effects of the given instructions \p I
394
  /// depend on or influence global memory.
395
  /// Memory dependence arises for example if the instruction reads from
396
  /// memory or may produce effects or undefined behaviour. Memory dependent
397
  /// instructions generally cannot be reorderd with respect to other memory
398
  /// dependent instructions or moved into non-dominated basic blocks.
399
  /// Instructions which just compute a value based on the values of their
400
  /// operands are not memory dependent.
401
  bool mayBeMemoryDependent(const Instruction &I);
402
403
  /// Return true if it is an intrinsic that cannot be speculated but also
404
  /// cannot trap.
405
  bool isAssumeLikeIntrinsic(const Instruction *I);
406
407
  /// Return true if it is valid to use the assumptions provided by an
408
  /// assume intrinsic, I, at the point in the control-flow identified by the
409
  /// context instruction, CxtI.
410
  bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
411
                               const DominatorTree *DT = nullptr);
412
413
  enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
414
415
  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
416
                                               const Value *RHS,
417
                                               const DataLayout &DL,
418
                                               AssumptionCache *AC,
419
                                               const Instruction *CxtI,
420
                                               const DominatorTree *DT,
421
                                               bool UseInstrInfo = true);
422
  OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
423
                                             const DataLayout &DL,
424
                                             AssumptionCache *AC,
425
                                             const Instruction *CxtI,
426
                                             const DominatorTree *DT,
427
                                             bool UseInstrInfo = true);
428
  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
429
                                               const Value *RHS,
430
                                               const DataLayout &DL,
431
                                               AssumptionCache *AC,
432
                                               const Instruction *CxtI,
433
                                               const DominatorTree *DT,
434
                                               bool UseInstrInfo = true);
435
  OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
436
                                             const DataLayout &DL,
437
                                             AssumptionCache *AC = nullptr,
438
                                             const Instruction *CxtI = nullptr,
439
                                             const DominatorTree *DT = nullptr);
440
  /// This version also leverages the sign bit of Add if known.
441
  OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
442
                                             const DataLayout &DL,
443
                                             AssumptionCache *AC = nullptr,
444
                                             const Instruction *CxtI = nullptr,
445
                                             const DominatorTree *DT = nullptr);
446
  OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
447
                                               const DataLayout &DL,
448
                                               AssumptionCache *AC,
449
                                               const Instruction *CxtI,
450
                                               const DominatorTree *DT);
451
  OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
452
                                             const DataLayout &DL,
453
                                             AssumptionCache *AC,
454
                                             const Instruction *CxtI,
455
                                             const DominatorTree *DT);
456
457
  /// Returns true if the arithmetic part of the \p II 's result is
458
  /// used only along the paths control dependent on the computation
459
  /// not overflowing, \p II being an <op>.with.overflow intrinsic.
460
  bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
461
                                 const DominatorTree &DT);
462
463
  /// Return true if this function can prove that the instruction I will
464
  /// always transfer execution to one of its successors (including the next
465
  /// instruction that follows within a basic block). E.g. this is not
466
  /// guaranteed for function calls that could loop infinitely.
467
  ///
468
  /// In other words, this function returns false for instructions that may
469
  /// transfer execution or fail to transfer execution in a way that is not
470
  /// captured in the CFG nor in the sequence of instructions within a basic
471
  /// block.
472
  ///
473
  /// Undefined behavior is assumed not to happen, so e.g. division is
474
  /// guaranteed to transfer execution to the following instruction even
475
  /// though division by zero might cause undefined behavior.
476
  bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
477
478
  /// Returns true if this block does not contain a potential implicit exit.
479
  /// This is equivelent to saying that all instructions within the basic block
480
  /// are guaranteed to transfer execution to their successor within the basic
481
  /// block. This has the same assumptions w.r.t. undefined behavior as the
482
  /// instruction variant of this function.
483
  bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
484
485
  /// Return true if this function can prove that the instruction I
486
  /// is executed for every iteration of the loop L.
487
  ///
488
  /// Note that this currently only considers the loop header.
489
  bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
490
                                              const Loop *L);
491
492
  /// Return true if this function can prove that I is guaranteed to yield
493
  /// full-poison (all bits poison) if at least one of its operands are
494
  /// full-poison (all bits poison).
495
  ///
496
  /// The exact rules for how poison propagates through instructions have
497
  /// not been settled as of 2015-07-10, so this function is conservative
498
  /// and only considers poison to be propagated in uncontroversial
499
  /// cases. There is no attempt to track values that may be only partially
500
  /// poison.
501
  bool propagatesFullPoison(const Instruction *I);
502
503
  /// Return either nullptr or an operand of I such that I will trigger
504
  /// undefined behavior if I is executed and that operand has a full-poison
505
  /// value (all bits poison).
506
  const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
507
508
  /// Return true if this function can prove that if PoisonI is executed
509
  /// and yields a full-poison value (all bits poison), then that will
510
  /// trigger undefined behavior.
511
  ///
512
  /// Note that this currently only considers the basic block that is
513
  /// the parent of I.
514
  bool programUndefinedIfFullPoison(const Instruction *PoisonI);
515
516
  /// Specific patterns of select instructions we can match.
517
  enum SelectPatternFlavor {
518
    SPF_UNKNOWN = 0,
519
    SPF_SMIN,                   /// Signed minimum
520
    SPF_UMIN,                   /// Unsigned minimum
521
    SPF_SMAX,                   /// Signed maximum
522
    SPF_UMAX,                   /// Unsigned maximum
523
    SPF_FMINNUM,                /// Floating point minnum
524
    SPF_FMAXNUM,                /// Floating point maxnum
525
    SPF_ABS,                    /// Absolute value
526
    SPF_NABS                    /// Negated absolute value
527
  };
528
529
  /// Behavior when a floating point min/max is given one NaN and one
530
  /// non-NaN as input.
531
  enum SelectPatternNaNBehavior {
532
    SPNB_NA = 0,                /// NaN behavior not applicable.
533
    SPNB_RETURNS_NAN,           /// Given one NaN input, returns the NaN.
534
    SPNB_RETURNS_OTHER,         /// Given one NaN input, returns the non-NaN.
535
    SPNB_RETURNS_ANY            /// Given one NaN input, can return either (or
536
                                /// it has been determined that no operands can
537
                                /// be NaN).
538
  };
539
540
  struct SelectPatternResult {
541
    SelectPatternFlavor Flavor;
542
    SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
543
                                          /// SPF_FMINNUM or SPF_FMAXNUM.
544
    bool Ordered;               /// When implementing this min/max pattern as
545
                                /// fcmp; select, does the fcmp have to be
546
                                /// ordered?
547
548
    /// Return true if \p SPF is a min or a max pattern.
549
22.8M
    static bool isMinOrMax(SelectPatternFlavor SPF) {
550
22.8M
      return SPF != SPF_UNKNOWN && 
SPF != SPF_ABS7.17M
&&
SPF != SPF_NABS6.35M
;
551
22.8M
    }
552
  };
553
554
  /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
555
  /// and providing the out parameter results if we successfully match.
556
  ///
557
  /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
558
  /// the negation instruction from the idiom.
559
  ///
560
  /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
561
  /// not match that of the original select. If this is the case, the cast
562
  /// operation (one of Trunc,SExt,Zext) that must be done to transform the
563
  /// type of LHS and RHS into the type of V is returned in CastOp.
564
  ///
565
  /// For example:
566
  ///   %1 = icmp slt i32 %a, i32 4
567
  ///   %2 = sext i32 %a to i64
568
  ///   %3 = select i1 %1, i64 %2, i64 4
569
  ///
570
  /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
571
  ///
572
  SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
573
                                         Instruction::CastOps *CastOp = nullptr,
574
                                         unsigned Depth = 0);
575
  inline SelectPatternResult
576
  matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
577
14.2M
                     Instruction::CastOps *CastOp = nullptr) {
578
14.2M
    Value *L = const_cast<Value*>(LHS);
579
14.2M
    Value *R = const_cast<Value*>(RHS);
580
14.2M
    auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
581
14.2M
    LHS = L;
582
14.2M
    RHS = R;
583
14.2M
    return Result;
584
14.2M
  }
585
586
  /// Return the canonical comparison predicate for the specified
587
  /// minimum/maximum flavor.
588
  CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
589
                                   bool Ordered = false);
590
591
  /// Return the inverse minimum/maximum flavor of the specified flavor.
592
  /// For example, signed minimum is the inverse of signed maximum.
593
  SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
594
595
  /// Return the canonical inverse comparison predicate for the specified
596
  /// minimum/maximum flavor.
597
  CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
598
599
  /// Return true if RHS is known to be implied true by LHS.  Return false if
600
  /// RHS is known to be implied false by LHS.  Otherwise, return None if no
601
  /// implication can be made.
602
  /// A & B must be i1 (boolean) values or a vector of such values. Note that
603
  /// the truth table for implication is the same as <=u on i1 values (but not
604
  /// <=s!).  The truth table for both is:
605
  ///    | T | F (B)
606
  ///  T | T | F
607
  ///  F | T | T
608
  /// (A)
609
  Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
610
                                    const DataLayout &DL, bool LHSIsTrue = true,
611
                                    unsigned Depth = 0);
612
613
  /// Return the boolean condition value in the context of the given instruction
614
  /// if it is known based on dominating conditions.
615
  Optional<bool> isImpliedByDomCondition(const Value *Cond,
616
                                         const Instruction *ContextI,
617
                                         const DataLayout &DL);
618
} // end namespace llvm
619
620
#endif // LLVM_ANALYSIS_VALUETRACKING_H