Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- InstCombineCalls.cpp -----------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements the visitCall, visitInvoke, and visitCallBr functions.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "InstCombineInternal.h"
14
#include "llvm/ADT/APFloat.h"
15
#include "llvm/ADT/APInt.h"
16
#include "llvm/ADT/APSInt.h"
17
#include "llvm/ADT/ArrayRef.h"
18
#include "llvm/ADT/None.h"
19
#include "llvm/ADT/Optional.h"
20
#include "llvm/ADT/STLExtras.h"
21
#include "llvm/ADT/SmallVector.h"
22
#include "llvm/ADT/Statistic.h"
23
#include "llvm/ADT/Twine.h"
24
#include "llvm/Analysis/AssumptionCache.h"
25
#include "llvm/Analysis/InstructionSimplify.h"
26
#include "llvm/Analysis/Loads.h"
27
#include "llvm/Analysis/MemoryBuiltins.h"
28
#include "llvm/Analysis/ValueTracking.h"
29
#include "llvm/Analysis/VectorUtils.h"
30
#include "llvm/IR/Attributes.h"
31
#include "llvm/IR/BasicBlock.h"
32
#include "llvm/IR/Constant.h"
33
#include "llvm/IR/Constants.h"
34
#include "llvm/IR/DataLayout.h"
35
#include "llvm/IR/DerivedTypes.h"
36
#include "llvm/IR/Function.h"
37
#include "llvm/IR/GlobalVariable.h"
38
#include "llvm/IR/InstrTypes.h"
39
#include "llvm/IR/Instruction.h"
40
#include "llvm/IR/Instructions.h"
41
#include "llvm/IR/IntrinsicInst.h"
42
#include "llvm/IR/Intrinsics.h"
43
#include "llvm/IR/LLVMContext.h"
44
#include "llvm/IR/Metadata.h"
45
#include "llvm/IR/PatternMatch.h"
46
#include "llvm/IR/Statepoint.h"
47
#include "llvm/IR/Type.h"
48
#include "llvm/IR/User.h"
49
#include "llvm/IR/Value.h"
50
#include "llvm/IR/ValueHandle.h"
51
#include "llvm/Support/AtomicOrdering.h"
52
#include "llvm/Support/Casting.h"
53
#include "llvm/Support/CommandLine.h"
54
#include "llvm/Support/Compiler.h"
55
#include "llvm/Support/Debug.h"
56
#include "llvm/Support/ErrorHandling.h"
57
#include "llvm/Support/KnownBits.h"
58
#include "llvm/Support/MathExtras.h"
59
#include "llvm/Support/raw_ostream.h"
60
#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
61
#include "llvm/Transforms/Utils/Local.h"
62
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
63
#include <algorithm>
64
#include <cassert>
65
#include <cstdint>
66
#include <cstring>
67
#include <utility>
68
#include <vector>
69
70
using namespace llvm;
71
using namespace PatternMatch;
72
73
#define DEBUG_TYPE "instcombine"
74
75
STATISTIC(NumSimplified, "Number of library calls simplified");
76
77
static cl::opt<unsigned> GuardWideningWindow(
78
    "instcombine-guard-widening-window",
79
    cl::init(3),
80
    cl::desc("How wide an instruction window to bypass looking for "
81
             "another guard"));
82
83
/// Return the specified type promoted as it would be to pass though a va_arg
84
/// area.
85
8
static Type *getPromotedType(Type *Ty) {
86
8
  if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
87
5
    if (ITy->getBitWidth() < 32)
88
1
      return Type::getInt32Ty(Ty->getContext());
89
7
  }
90
7
  return Ty;
91
7
}
92
93
/// Return a constant boolean vector that has true elements in all positions
94
/// where the input constant data vector has an element with the sign bit set.
95
26
static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
96
26
  SmallVector<Constant *, 32> BoolVec;
97
26
  IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
98
180
  for (unsigned I = 0, E = V->getNumElements(); I != E; 
++I154
) {
99
154
    Constant *Elt = V->getElementAsConstant(I);
100
154
    assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
101
154
           "Unexpected constant data vector element type");
102
154
    bool Sign = V->getElementType()->isIntegerTy()
103
154
                    ? 
cast<ConstantInt>(Elt)->isNegative()136
104
154
                    : 
cast<ConstantFP>(Elt)->isNegative()18
;
105
154
    BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
106
154
  }
107
26
  return ConstantVector::get(BoolVec);
108
26
}
109
110
242k
Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
111
242k
  unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
112
242k
  unsigned CopyDstAlign = MI->getDestAlignment();
113
242k
  if (CopyDstAlign < DstAlign){
114
1.22k
    MI->setDestAlignment(DstAlign);
115
1.22k
    return MI;
116
1.22k
  }
117
241k
118
241k
  unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
119
241k
  unsigned CopySrcAlign = MI->getSourceAlignment();
120
241k
  if (CopySrcAlign < SrcAlign) {
121
520
    MI->setSourceAlignment(SrcAlign);
122
520
    return MI;
123
520
  }
124
240k
125
240k
  // If we have a store to a location which is known constant, we can conclude
126
240k
  // that the store must be storing the constant value (else the memory
127
240k
  // wouldn't be constant), and this must be a noop.
128
240k
  if (AA->pointsToConstantMemory(MI->getDest())) {
129
2
    // Set the size of the copy to 0, it will be deleted on the next iteration.
130
2
    MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
131
2
    return MI;
132
2
  }
133
240k
134
240k
  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
135
240k
  // load/store.
136
240k
  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
137
240k
  if (!MemOpLength) 
return nullptr23.7k
;
138
217k
139
217k
  // Source and destination pointer types are always "i8*" for intrinsic.  See
140
217k
  // if the size is something we can handle with a single primitive load/store.
141
217k
  // A single load+store correctly handles overlapping memory in the memmove
142
217k
  // case.
143
217k
  uint64_t Size = MemOpLength->getLimitedValue();
144
217k
  assert(Size && "0-sized memory transferring should be removed already.");
145
217k
146
217k
  if (Size > 8 || 
(Size&(Size-1))12.5k
)
147
214k
    return nullptr;  // If not 1/2/4/8 bytes, exit.
148
2.49k
149
2.49k
  // If it is an atomic and alignment is less than the size then we will
150
2.49k
  // introduce the unaligned memory access which will be later transformed
151
2.49k
  // into libcall in CodeGen. This is not evident performance gain so disable
152
2.49k
  // it now.
153
2.49k
  if (isa<AtomicMemTransferInst>(MI))
154
52
    if (CopyDstAlign < Size || 
CopySrcAlign < Size28
)
155
24
      return nullptr;
156
2.46k
157
2.46k
  // Use an integer load+store unless we can find something better.
158
2.46k
  unsigned SrcAddrSp =
159
2.46k
    cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
160
2.46k
  unsigned DstAddrSp =
161
2.46k
    cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
162
2.46k
163
2.46k
  IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
164
2.46k
  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
165
2.46k
  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
166
2.46k
167
2.46k
  // If the memcpy has metadata describing the members, see if we can get the
168
2.46k
  // TBAA tag describing our copy.
169
2.46k
  MDNode *CopyMD = nullptr;
170
2.46k
  if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
171
2
    CopyMD = M;
172
2.46k
  } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
173
1.04k
    if (M->getNumOperands() == 3 && 
M->getOperand(0)748
&&
174
1.04k
        
mdconst::hasa<ConstantInt>(M->getOperand(0))748
&&
175
1.04k
        
mdconst::extract<ConstantInt>(M->getOperand(0))->isZero()748
&&
176
1.04k
        
M->getOperand(1)748
&&
177
1.04k
        
mdconst::hasa<ConstantInt>(M->getOperand(1))748
&&
178
1.04k
        mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
179
748
        Size &&
180
1.04k
        
M->getOperand(2)748
&&
isa<MDNode>(M->getOperand(2))746
)
181
746
      CopyMD = cast<MDNode>(M->getOperand(2));
182
1.04k
  }
183
2.46k
184
2.46k
  Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
185
2.46k
  Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
186
2.46k
  LoadInst *L = Builder.CreateLoad(IntType, Src);
187
2.46k
  // Alignment from the mem intrinsic will be better, so use it.
188
2.46k
  L->setAlignment(CopySrcAlign);
189
2.46k
  if (CopyMD)
190
748
    L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
191
2.46k
  MDNode *LoopMemParallelMD =
192
2.46k
    MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
193
2.46k
  if (LoopMemParallelMD)
194
0
    L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
195
2.46k
  MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
196
2.46k
  if (AccessGroupMD)
197
1
    L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
198
2.46k
199
2.46k
  StoreInst *S = Builder.CreateStore(L, Dest);
200
2.46k
  // Alignment from the mem intrinsic will be better, so use it.
201
2.46k
  S->setAlignment(CopyDstAlign);
202
2.46k
  if (CopyMD)
203
748
    S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
204
2.46k
  if (LoopMemParallelMD)
205
0
    S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
206
2.46k
  if (AccessGroupMD)
207
1
    S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
208
2.46k
209
2.46k
  if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
210
2.43k
    // non-atomics can be volatile
211
2.43k
    L->setVolatile(MT->isVolatile());
212
2.43k
    S->setVolatile(MT->isVolatile());
213
2.43k
  }
214
2.46k
  if (isa<AtomicMemTransferInst>(MI)) {
215
28
    // atomics have to be unordered
216
28
    L->setOrdering(AtomicOrdering::Unordered);
217
28
    S->setOrdering(AtomicOrdering::Unordered);
218
28
  }
219
2.46k
220
2.46k
  // Set the size of the copy to 0, it will be deleted on the next iteration.
221
2.46k
  MI->setLength(Constant::getNullValue(MemOpLength->getType()));
222
2.46k
  return MI;
223
2.46k
}
224
225
292k
Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
226
292k
  unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
227
292k
  if (MI->getDestAlignment() < Alignment) {
228
1.85k
    MI->setDestAlignment(Alignment);
229
1.85k
    return MI;
230
1.85k
  }
231
290k
232
290k
  // If we have a store to a location which is known constant, we can conclude
233
290k
  // that the store must be storing the constant value (else the memory
234
290k
  // wouldn't be constant), and this must be a noop.
235
290k
  if (AA->pointsToConstantMemory(MI->getDest())) {
236
1
    // Set the size of the copy to 0, it will be deleted on the next iteration.
237
1
    MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
238
1
    return MI;
239
1
  }
240
290k
241
290k
  // Extract the length and alignment and fill if they are constant.
242
290k
  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
243
290k
  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
244
290k
  if (!LenC || 
!FillC249k
||
!FillC->getType()->isIntegerTy(8)249k
)
245
40.9k
    return nullptr;
246
249k
  uint64_t Len = LenC->getLimitedValue();
247
249k
  Alignment = MI->getDestAlignment();
248
249k
  assert(Len && "0-sized memory setting should be removed already.");
249
249k
250
249k
  // Alignment 0 is identity for alignment 1 for memset, but not store.
251
249k
  if (Alignment == 0)
252
0
    Alignment = 1;
253
249k
254
249k
  // If it is an atomic and alignment is less than the size then we will
255
249k
  // introduce the unaligned memory access which will be later transformed
256
249k
  // into libcall in CodeGen. This is not evident performance gain so disable
257
249k
  // it now.
258
249k
  if (isa<AtomicMemSetInst>(MI))
259
36
    if (Alignment < Len)
260
20
      return nullptr;
261
249k
262
249k
  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
263
249k
  if (Len <= 8 && 
isPowerOf2_32((uint32_t)Len)1.08k
) {
264
485
    Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
265
485
266
485
    Value *Dest = MI->getDest();
267
485
    unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
268
485
    Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
269
485
    Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
270
485
271
485
    // Extract the fill value and store.
272
485
    uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
273
485
    StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
274
485
                                       MI->isVolatile());
275
485
    S->setAlignment(Alignment);
276
485
    if (isa<AtomicMemSetInst>(MI))
277
14
      S->setOrdering(AtomicOrdering::Unordered);
278
485
279
485
    // Set the size of the copy to 0, it will be deleted on the next iteration.
280
485
    MI->setLength(Constant::getNullValue(LenC->getType()));
281
485
    return MI;
282
485
  }
283
248k
284
248k
  return nullptr;
285
248k
}
286
287
static Value *simplifyX86immShift(const IntrinsicInst &II,
288
395
                                  InstCombiner::BuilderTy &Builder) {
289
395
  bool LogicalShift = false;
290
395
  bool ShiftLeft = false;
291
395
292
395
  switch (II.getIntrinsicID()) {
293
395
  
default: 0
llvm_unreachable0
("Unexpected intrinsic!");
294
395
  case Intrinsic::x86_sse2_psra_d:
295
139
  case Intrinsic::x86_sse2_psra_w:
296
139
  case Intrinsic::x86_sse2_psrai_d:
297
139
  case Intrinsic::x86_sse2_psrai_w:
298
139
  case Intrinsic::x86_avx2_psra_d:
299
139
  case Intrinsic::x86_avx2_psra_w:
300
139
  case Intrinsic::x86_avx2_psrai_d:
301
139
  case Intrinsic::x86_avx2_psrai_w:
302
139
  case Intrinsic::x86_avx512_psra_q_128:
303
139
  case Intrinsic::x86_avx512_psrai_q_128:
304
139
  case Intrinsic::x86_avx512_psra_q_256:
305
139
  case Intrinsic::x86_avx512_psrai_q_256:
306
139
  case Intrinsic::x86_avx512_psra_d_512:
307
139
  case Intrinsic::x86_avx512_psra_q_512:
308
139
  case Intrinsic::x86_avx512_psra_w_512:
309
139
  case Intrinsic::x86_avx512_psrai_d_512:
310
139
  case Intrinsic::x86_avx512_psrai_q_512:
311
139
  case Intrinsic::x86_avx512_psrai_w_512:
312
139
    LogicalShift = false; ShiftLeft = false;
313
139
    break;
314
139
  case Intrinsic::x86_sse2_psrl_d:
315
136
  case Intrinsic::x86_sse2_psrl_q:
316
136
  case Intrinsic::x86_sse2_psrl_w:
317
136
  case Intrinsic::x86_sse2_psrli_d:
318
136
  case Intrinsic::x86_sse2_psrli_q:
319
136
  case Intrinsic::x86_sse2_psrli_w:
320
136
  case Intrinsic::x86_avx2_psrl_d:
321
136
  case Intrinsic::x86_avx2_psrl_q:
322
136
  case Intrinsic::x86_avx2_psrl_w:
323
136
  case Intrinsic::x86_avx2_psrli_d:
324
136
  case Intrinsic::x86_avx2_psrli_q:
325
136
  case Intrinsic::x86_avx2_psrli_w:
326
136
  case Intrinsic::x86_avx512_psrl_d_512:
327
136
  case Intrinsic::x86_avx512_psrl_q_512:
328
136
  case Intrinsic::x86_avx512_psrl_w_512:
329
136
  case Intrinsic::x86_avx512_psrli_d_512:
330
136
  case Intrinsic::x86_avx512_psrli_q_512:
331
136
  case Intrinsic::x86_avx512_psrli_w_512:
332
136
    LogicalShift = true; ShiftLeft = false;
333
136
    break;
334
136
  case Intrinsic::x86_sse2_psll_d:
335
120
  case Intrinsic::x86_sse2_psll_q:
336
120
  case Intrinsic::x86_sse2_psll_w:
337
120
  case Intrinsic::x86_sse2_pslli_d:
338
120
  case Intrinsic::x86_sse2_pslli_q:
339
120
  case Intrinsic::x86_sse2_pslli_w:
340
120
  case Intrinsic::x86_avx2_psll_d:
341
120
  case Intrinsic::x86_avx2_psll_q:
342
120
  case Intrinsic::x86_avx2_psll_w:
343
120
  case Intrinsic::x86_avx2_pslli_d:
344
120
  case Intrinsic::x86_avx2_pslli_q:
345
120
  case Intrinsic::x86_avx2_pslli_w:
346
120
  case Intrinsic::x86_avx512_psll_d_512:
347
120
  case Intrinsic::x86_avx512_psll_q_512:
348
120
  case Intrinsic::x86_avx512_psll_w_512:
349
120
  case Intrinsic::x86_avx512_pslli_d_512:
350
120
  case Intrinsic::x86_avx512_pslli_q_512:
351
120
  case Intrinsic::x86_avx512_pslli_w_512:
352
120
    LogicalShift = true; ShiftLeft = true;
353
120
    break;
354
395
  }
355
395
  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
356
395
357
395
  // Simplify if count is constant.
358
395
  auto Arg1 = II.getArgOperand(1);
359
395
  auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
360
395
  auto CDV = dyn_cast<ConstantDataVector>(Arg1);
361
395
  auto CInt = dyn_cast<ConstantInt>(Arg1);
362
395
  if (!CAZ && 
!CDV368
&&
!CInt261
)
363
132
    return nullptr;
364
263
365
263
  APInt Count(64, 0);
366
263
  if (CDV) {
367
107
    // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
368
107
    // operand to compute the shift amount.
369
107
    auto VT = cast<VectorType>(CDV->getType());
370
107
    unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
371
107
    assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
372
107
    unsigned NumSubElts = 64 / BitWidth;
373
107
374
107
    // Concatenate the sub-elements to create the 64-bit value.
375
377
    for (unsigned i = 0; i != NumSubElts; 
++i270
) {
376
270
      unsigned SubEltIdx = (NumSubElts - 1) - i;
377
270
      auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
378
270
      Count <<= BitWidth;
379
270
      Count |= SubElt->getValue().zextOrTrunc(64);
380
270
    }
381
107
  }
382
156
  else if (CInt)
383
129
    Count = CInt->getValue();
384
263
385
263
  auto Vec = II.getArgOperand(0);
386
263
  auto VT = cast<VectorType>(Vec->getType());
387
263
  auto SVT = VT->getElementType();
388
263
  unsigned VWidth = VT->getNumElements();
389
263
  unsigned BitWidth = SVT->getPrimitiveSizeInBits();
390
263
391
263
  // If shift-by-zero then just return the original value.
392
263
  if (Count.isNullValue())
393
71
    return Vec;
394
192
395
192
  // Handle cases when Shift >= BitWidth.
396
192
  if (Count.uge(BitWidth)) {
397
96
    // If LogicalShift - just return zero.
398
96
    if (LogicalShift)
399
72
      return ConstantAggregateZero::get(VT);
400
24
401
24
    // If ArithmeticShift - clamp Shift to (BitWidth - 1).
402
24
    Count = APInt(64, BitWidth - 1);
403
24
  }
404
192
405
192
  // Get a constant vector of the same type as the first operand.
406
192
  auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
407
120
  auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
408
120
409
120
  if (ShiftLeft)
410
30
    return Builder.CreateShl(Vec, ShiftVec);
411
90
412
90
  if (LogicalShift)
413
30
    return Builder.CreateLShr(Vec, ShiftVec);
414
60
415
60
  return Builder.CreateAShr(Vec, ShiftVec);
416
60
}
417
418
// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
419
// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
420
// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
421
static Value *simplifyX86varShift(const IntrinsicInst &II,
422
126
                                  InstCombiner::BuilderTy &Builder) {
423
126
  bool LogicalShift = false;
424
126
  bool ShiftLeft = false;
425
126
426
126
  switch (II.getIntrinsicID()) {
427
126
  
default: 0
llvm_unreachable0
("Unexpected intrinsic!");
428
126
  case Intrinsic::x86_avx2_psrav_d:
429
36
  case Intrinsic::x86_avx2_psrav_d_256:
430
36
  case Intrinsic::x86_avx512_psrav_q_128:
431
36
  case Intrinsic::x86_avx512_psrav_q_256:
432
36
  case Intrinsic::x86_avx512_psrav_d_512:
433
36
  case Intrinsic::x86_avx512_psrav_q_512:
434
36
  case Intrinsic::x86_avx512_psrav_w_128:
435
36
  case Intrinsic::x86_avx512_psrav_w_256:
436
36
  case Intrinsic::x86_avx512_psrav_w_512:
437
36
    LogicalShift = false;
438
36
    ShiftLeft = false;
439
36
    break;
440
45
  case Intrinsic::x86_avx2_psrlv_d:
441
45
  case Intrinsic::x86_avx2_psrlv_d_256:
442
45
  case Intrinsic::x86_avx2_psrlv_q:
443
45
  case Intrinsic::x86_avx2_psrlv_q_256:
444
45
  case Intrinsic::x86_avx512_psrlv_d_512:
445
45
  case Intrinsic::x86_avx512_psrlv_q_512:
446
45
  case Intrinsic::x86_avx512_psrlv_w_128:
447
45
  case Intrinsic::x86_avx512_psrlv_w_256:
448
45
  case Intrinsic::x86_avx512_psrlv_w_512:
449
45
    LogicalShift = true;
450
45
    ShiftLeft = false;
451
45
    break;
452
45
  case Intrinsic::x86_avx2_psllv_d:
453
45
  case Intrinsic::x86_avx2_psllv_d_256:
454
45
  case Intrinsic::x86_avx2_psllv_q:
455
45
  case Intrinsic::x86_avx2_psllv_q_256:
456
45
  case Intrinsic::x86_avx512_psllv_d_512:
457
45
  case Intrinsic::x86_avx512_psllv_q_512:
458
45
  case Intrinsic::x86_avx512_psllv_w_128:
459
45
  case Intrinsic::x86_avx512_psllv_w_256:
460
45
  case Intrinsic::x86_avx512_psllv_w_512:
461
45
    LogicalShift = true;
462
45
    ShiftLeft = true;
463
45
    break;
464
126
  }
465
126
  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
466
126
467
126
  // Simplify if all shift amounts are constant/undef.
468
126
  auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
469
126
  if (!CShift)
470
0
    return nullptr;
471
126
472
126
  auto Vec = II.getArgOperand(0);
473
126
  auto VT = cast<VectorType>(II.getType());
474
126
  auto SVT = VT->getVectorElementType();
475
126
  int NumElts = VT->getNumElements();
476
126
  int BitWidth = SVT->getIntegerBitWidth();
477
126
478
126
  // Collect each element's shift amount.
479
126
  // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
480
126
  bool AnyOutOfRange = false;
481
126
  SmallVector<int, 8> ShiftAmts;
482
1.49k
  for (int I = 0; I < NumElts; 
++I1.37k
) {
483
1.37k
    auto *CElt = CShift->getAggregateElement(I);
484
1.37k
    if (CElt && isa<UndefValue>(CElt)) {
485
67
      ShiftAmts.push_back(-1);
486
67
      continue;
487
67
    }
488
1.30k
489
1.30k
    auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
490
1.30k
    if (!COp)
491
0
      return nullptr;
492
1.30k
493
1.30k
    // Handle out of range shifts.
494
1.30k
    // If LogicalShift - set to BitWidth (special case).
495
1.30k
    // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
496
1.30k
    APInt ShiftVal = COp->getValue();
497
1.30k
    if (ShiftVal.uge(BitWidth)) {
498
284
      AnyOutOfRange = LogicalShift;
499
284
      ShiftAmts.push_back(LogicalShift ? 
BitWidth192
:
BitWidth - 192
);
500
284
      continue;
501
284
    }
502
1.02k
503
1.02k
    ShiftAmts.push_back((int)ShiftVal.getZExtValue());
504
1.02k
  }
505
126
506
126
  // If all elements out of range or UNDEF, return vector of zeros/undefs.
507
126
  // ArithmeticShift should only hit this if they are all UNDEF.
508
325
  
auto OutOfRange = [&](int Idx) 126
{ return (Idx < 0) ||
(BitWidth <= Idx)278
; };
509
126
  if (llvm::all_of(ShiftAmts, OutOfRange)) {
510
18
    SmallVector<Constant *, 8> ConstantVec;
511
196
    for (int Idx : ShiftAmts) {
512
196
      if (Idx < 0) {
513
26
        ConstantVec.push_back(UndefValue::get(SVT));
514
170
      } else {
515
170
        assert(LogicalShift && "Logical shift expected");
516
170
        ConstantVec.push_back(ConstantInt::getNullValue(SVT));
517
170
      }
518
196
    }
519
18
    return ConstantVector::get(ConstantVec);
520
18
  }
521
108
522
108
  // We can't handle only some out of range values with generic logical shifts.
523
108
  if (AnyOutOfRange)
524
18
    return nullptr;
525
90
526
90
  // Build the shift amount constant vector.
527
90
  SmallVector<Constant *, 8> ShiftVecAmts;
528
980
  for (int Idx : ShiftAmts) {
529
980
    if (Idx < 0)
530
41
      ShiftVecAmts.push_back(UndefValue::get(SVT));
531
939
    else
532
939
      ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
533
980
  }
534
90
  auto ShiftVec = ConstantVector::get(ShiftVecAmts);
535
90
536
90
  if (ShiftLeft)
537
27
    return Builder.CreateShl(Vec, ShiftVec);
538
63
539
63
  if (LogicalShift)
540
27
    return Builder.CreateLShr(Vec, ShiftVec);
541
36
542
36
  return Builder.CreateAShr(Vec, ShiftVec);
543
36
}
544
545
static Value *simplifyX86pack(IntrinsicInst &II,
546
57
                              InstCombiner::BuilderTy &Builder, bool IsSigned) {
547
57
  Value *Arg0 = II.getArgOperand(0);
548
57
  Value *Arg1 = II.getArgOperand(1);
549
57
  Type *ResTy = II.getType();
550
57
551
57
  // Fast all undef handling.
552
57
  if (isa<UndefValue>(Arg0) && 
isa<UndefValue>(Arg1)5
)
553
0
    return UndefValue::get(ResTy);
554
57
555
57
  Type *ArgTy = Arg0->getType();
556
57
  unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
557
57
  unsigned NumSrcElts = ArgTy->getVectorNumElements();
558
57
  assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) &&
559
57
         "Unexpected packing types");
560
57
561
57
  unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
562
57
  unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
563
57
  unsigned SrcScalarSizeInBits = ArgTy->getScalarSizeInBits();
564
57
  assert(SrcScalarSizeInBits == (2 * DstScalarSizeInBits) &&
565
57
         "Unexpected packing types");
566
57
567
57
  // Constant folding.
568
57
  if (!isa<Constant>(Arg0) || 
!isa<Constant>(Arg1)20
)
569
39
    return nullptr;
570
18
571
18
  // Clamp Values - signed/unsigned both use signed clamp values, but they
572
18
  // differ on the min/max values.
573
18
  APInt MinValue, MaxValue;
574
18
  if (IsSigned) {
575
9
    // PACKSS: Truncate signed value with signed saturation.
576
9
    // Source values less than dst minint are saturated to minint.
577
9
    // Source values greater than dst maxint are saturated to maxint.
578
9
    MinValue =
579
9
        APInt::getSignedMinValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
580
9
    MaxValue =
581
9
        APInt::getSignedMaxValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
582
9
  } else {
583
9
    // PACKUS: Truncate signed value with unsigned saturation.
584
9
    // Source values less than zero are saturated to zero.
585
9
    // Source values greater than dst maxuint are saturated to maxuint.
586
9
    MinValue = APInt::getNullValue(SrcScalarSizeInBits);
587
9
    MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
588
9
  }
589
18
590
18
  auto *MinC = Constant::getIntegerValue(ArgTy, MinValue);
591
18
  auto *MaxC = Constant::getIntegerValue(ArgTy, MaxValue);
592
18
  Arg0 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg0, MinC), MinC, Arg0);
593
18
  Arg1 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg1, MinC), MinC, Arg1);
594
18
  Arg0 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg0, MaxC), MaxC, Arg0);
595
18
  Arg1 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg1, MaxC), MaxC, Arg1);
596
18
597
18
  // Shuffle clamped args together at the lane level.
598
18
  SmallVector<unsigned, 32> PackMask;
599
60
  for (unsigned Lane = 0; Lane != NumLanes; 
++Lane42
) {
600
322
    for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; 
++Elt280
)
601
280
      PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane));
602
322
    for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; 
++Elt280
)
603
280
      PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
604
42
  }
605
18
  auto *Shuffle = Builder.CreateShuffleVector(Arg0, Arg1, PackMask);
606
18
607
18
  // Truncate to dst size.
608
18
  return Builder.CreateTrunc(Shuffle, ResTy);
609
18
}
610
611
static Value *simplifyX86movmsk(const IntrinsicInst &II,
612
51
                                InstCombiner::BuilderTy &Builder) {
613
51
  Value *Arg = II.getArgOperand(0);
614
51
  Type *ResTy = II.getType();
615
51
  Type *ArgTy = Arg->getType();
616
51
617
51
  // movmsk(undef) -> zero as we must ensure the upper bits are zero.
618
51
  if (isa<UndefValue>(Arg))
619
7
    return Constant::getNullValue(ResTy);
620
44
621
44
  // We can't easily peek through x86_mmx types.
622
44
  if (!ArgTy->isVectorTy())
623
6
    return nullptr;
624
38
625
38
  // Expand MOVMSK to compare/bitcast/zext:
626
38
  // e.g. PMOVMSKB(v16i8 x):
627
38
  // %cmp = icmp slt <16 x i8> %x, zeroinitializer
628
38
  // %int = bitcast <16 x i1> %cmp to i16
629
38
  // %res = zext i16 %int to i32
630
38
  unsigned NumElts = ArgTy->getVectorNumElements();
631
38
  Type *IntegerVecTy = VectorType::getInteger(cast<VectorType>(ArgTy));
632
38
  Type *IntegerTy = Builder.getIntNTy(NumElts);
633
38
634
38
  Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy);
635
38
  Res = Builder.CreateICmpSLT(Res, Constant::getNullValue(IntegerVecTy));
636
38
  Res = Builder.CreateBitCast(Res, IntegerTy);
637
38
  Res = Builder.CreateZExtOrTrunc(Res, ResTy);
638
38
  return Res;
639
38
}
640
641
static Value *simplifyX86addcarry(const IntrinsicInst &II,
642
2
                                  InstCombiner::BuilderTy &Builder) {
643
2
  Value *CarryIn = II.getArgOperand(0);
644
2
  Value *Op1 = II.getArgOperand(1);
645
2
  Value *Op2 = II.getArgOperand(2);
646
2
  Type *RetTy = II.getType();
647
2
  Type *OpTy = Op1->getType();
648
2
  assert(RetTy->getStructElementType(0)->isIntegerTy(8) &&
649
2
         RetTy->getStructElementType(1) == OpTy && OpTy == Op2->getType() &&
650
2
         "Unexpected types for x86 addcarry");
651
2
652
2
  // If carry-in is zero, this is just an unsigned add with overflow.
653
2
  if (match(CarryIn, m_ZeroInt())) {
654
2
    Value *UAdd = Builder.CreateIntrinsic(Intrinsic::uadd_with_overflow, OpTy,
655
2
                                          { Op1, Op2 });
656
2
    // The types have to be adjusted to match the x86 call types.
657
2
    Value *UAddResult = Builder.CreateExtractValue(UAdd, 0);
658
2
    Value *UAddOV = Builder.CreateZExt(Builder.CreateExtractValue(UAdd, 1),
659
2
                                       Builder.getInt8Ty());
660
2
    Value *Res = UndefValue::get(RetTy);
661
2
    Res = Builder.CreateInsertValue(Res, UAddOV, 0);
662
2
    return Builder.CreateInsertValue(Res, UAddResult, 1);
663
2
  }
664
0
665
0
  return nullptr;
666
0
}
667
668
static Value *simplifyX86insertps(const IntrinsicInst &II,
669
14
                                  InstCombiner::BuilderTy &Builder) {
670
14
  auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
671
14
  if (!CInt)
672
0
    return nullptr;
673
14
674
14
  VectorType *VecTy = cast<VectorType>(II.getType());
675
14
  assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
676
14
677
14
  // The immediate permute control byte looks like this:
678
14
  //    [3:0] - zero mask for each 32-bit lane
679
14
  //    [5:4] - select one 32-bit destination lane
680
14
  //    [7:6] - select one 32-bit source lane
681
14
682
14
  uint8_t Imm = CInt->getZExtValue();
683
14
  uint8_t ZMask = Imm & 0xf;
684
14
  uint8_t DestLane = (Imm >> 4) & 0x3;
685
14
  uint8_t SourceLane = (Imm >> 6) & 0x3;
686
14
687
14
  ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
688
14
689
14
  // If all zero mask bits are set, this was just a weird way to
690
14
  // generate a zero vector.
691
14
  if (ZMask == 0xf)
692
2
    return ZeroVector;
693
12
694
12
  // Initialize by passing all of the first source bits through.
695
12
  uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
696
12
697
12
  // We may replace the second operand with the zero vector.
698
12
  Value *V1 = II.getArgOperand(1);
699
12
700
12
  if (ZMask) {
701
4
    // If the zero mask is being used with a single input or the zero mask
702
4
    // overrides the destination lane, this is a shuffle with the zero vector.
703
4
    if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
704
4
        
(ZMask & (1 << DestLane))2
) {
705
3
      V1 = ZeroVector;
706
3
      // We may still move 32-bits of the first source vector from one lane
707
3
      // to another.
708
3
      ShuffleMask[DestLane] = SourceLane;
709
3
      // The zero mask may override the previous insert operation.
710
15
      for (unsigned i = 0; i < 4; 
++i12
)
711
12
        if ((ZMask >> i) & 0x1)
712
5
          ShuffleMask[i] = i + 4;
713
3
    } else {
714
1
      // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
715
1
      return nullptr;
716
1
    }
717
8
  } else {
718
8
    // Replace the selected destination lane with the selected source lane.
719
8
    ShuffleMask[DestLane] = SourceLane + 4;
720
8
  }
721
12
722
12
  
return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask)11
;
723
12
}
724
725
/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
726
/// or conversion to a shuffle vector.
727
static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
728
                               ConstantInt *CILength, ConstantInt *CIIndex,
729
35
                               InstCombiner::BuilderTy &Builder) {
730
35
  auto LowConstantHighUndef = [&](uint64_t Val) {
731
6
    Type *IntTy64 = Type::getInt64Ty(II.getContext());
732
6
    Constant *Args[] = {ConstantInt::get(IntTy64, Val),
733
6
                        UndefValue::get(IntTy64)};
734
6
    return ConstantVector::get(Args);
735
6
  };
736
35
737
35
  // See if we're dealing with constant values.
738
35
  Constant *C0 = dyn_cast<Constant>(Op0);
739
35
  ConstantInt *CI0 =
740
35
      C0 ? 
dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))8
741
35
         : 
nullptr27
;
742
35
743
35
  // Attempt to constant fold.
744
35
  if (CILength && 
CIIndex20
) {
745
20
    // From AMD documentation: "The bit index and field length are each six
746
20
    // bits in length other bits of the field are ignored."
747
20
    APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
748
20
    APInt APLength = CILength->getValue().zextOrTrunc(6);
749
20
750
20
    unsigned Index = APIndex.getZExtValue();
751
20
752
20
    // From AMD documentation: "a value of zero in the field length is
753
20
    // defined as length of 64".
754
20
    unsigned Length = APLength == 0 ? 
642
:
APLength.getZExtValue()18
;
755
20
756
20
    // From AMD documentation: "If the sum of the bit index + length field
757
20
    // is greater than 64, the results are undefined".
758
20
    unsigned End = Index + Length;
759
20
760
20
    // Note that both field index and field length are 8-bit quantities.
761
20
    // Since variables 'Index' and 'Length' are unsigned values
762
20
    // obtained from zero-extending field index and field length
763
20
    // respectively, their sum should never wrap around.
764
20
    if (End > 64)
765
1
      return UndefValue::get(II.getType());
766
19
767
19
    // If we are inserting whole bytes, we can convert this to a shuffle.
768
19
    // Lowering can recognize EXTRQI shuffle masks.
769
19
    if ((Length % 8) == 0 && 
(Index % 8) == 011
) {
770
5
      // Convert bit indices to byte indices.
771
5
      Length /= 8;
772
5
      Index /= 8;
773
5
774
5
      Type *IntTy8 = Type::getInt8Ty(II.getContext());
775
5
      Type *IntTy32 = Type::getInt32Ty(II.getContext());
776
5
      VectorType *ShufTy = VectorType::get(IntTy8, 16);
777
5
778
5
      SmallVector<Constant *, 16> ShuffleMask;
779
27
      for (int i = 0; i != (int)Length; 
++i22
)
780
22
        ShuffleMask.push_back(
781
22
            Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
782
23
      for (int i = Length; i != 8; 
++i18
)
783
18
        ShuffleMask.push_back(
784
18
            Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
785
45
      for (int i = 8; i != 16; 
++i40
)
786
40
        ShuffleMask.push_back(UndefValue::get(IntTy32));
787
5
788
5
      Value *SV = Builder.CreateShuffleVector(
789
5
          Builder.CreateBitCast(Op0, ShufTy),
790
5
          ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
791
5
      return Builder.CreateBitCast(SV, II.getType());
792
5
    }
793
14
794
14
    // Constant Fold - shift Index'th bit to lowest position and mask off
795
14
    // Length bits.
796
14
    if (CI0) {
797
5
      APInt Elt = CI0->getValue();
798
5
      Elt.lshrInPlace(Index);
799
5
      Elt = Elt.zextOrTrunc(Length);
800
5
      return LowConstantHighUndef(Elt.getZExtValue());
801
5
    }
802
9
803
9
    // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
804
9
    if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
805
1
      Value *Args[] = {Op0, CILength, CIIndex};
806
1
      Module *M = II.getModule();
807
1
      Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
808
1
      return Builder.CreateCall(F, Args);
809
1
    }
810
23
  }
811
23
812
23
  // Constant Fold - extraction from zero is always {zero, undef}.
813
23
  if (CI0 && 
CI0->isZero()1
)
814
1
    return LowConstantHighUndef(0);
815
22
816
22
  return nullptr;
817
22
}
818
819
/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
820
/// folding or conversion to a shuffle vector.
821
static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
822
                                 APInt APLength, APInt APIndex,
823
34
                                 InstCombiner::BuilderTy &Builder) {
824
34
  // From AMD documentation: "The bit index and field length are each six bits
825
34
  // in length other bits of the field are ignored."
826
34
  APIndex = APIndex.zextOrTrunc(6);
827
34
  APLength = APLength.zextOrTrunc(6);
828
34
829
34
  // Attempt to constant fold.
830
34
  unsigned Index = APIndex.getZExtValue();
831
34
832
34
  // From AMD documentation: "a value of zero in the field length is
833
34
  // defined as length of 64".
834
34
  unsigned Length = APLength == 0 ? 
644
:
APLength.getZExtValue()30
;
835
34
836
34
  // From AMD documentation: "If the sum of the bit index + length field
837
34
  // is greater than 64, the results are undefined".
838
34
  unsigned End = Index + Length;
839
34
840
34
  // Note that both field index and field length are 8-bit quantities.
841
34
  // Since variables 'Index' and 'Length' are unsigned values
842
34
  // obtained from zero-extending field index and field length
843
34
  // respectively, their sum should never wrap around.
844
34
  if (End > 64)
845
3
    return UndefValue::get(II.getType());
846
31
847
31
  // If we are inserting whole bytes, we can convert this to a shuffle.
848
31
  // Lowering can recognize INSERTQI shuffle masks.
849
31
  if ((Length % 8) == 0 && 
(Index % 8) == 08
) {
850
4
    // Convert bit indices to byte indices.
851
4
    Length /= 8;
852
4
    Index /= 8;
853
4
854
4
    Type *IntTy8 = Type::getInt8Ty(II.getContext());
855
4
    Type *IntTy32 = Type::getInt32Ty(II.getContext());
856
4
    VectorType *ShufTy = VectorType::get(IntTy8, 16);
857
4
858
4
    SmallVector<Constant *, 16> ShuffleMask;
859
8
    for (int i = 0; i != (int)Index; 
++i4
)
860
4
      ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
861
26
    for (int i = 0; i != (int)Length; 
++i22
)
862
22
      ShuffleMask.push_back(
863
22
          Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
864
10
    for (int i = Index + Length; i != 8; 
++i6
)
865
6
      ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
866
36
    for (int i = 8; i != 16; 
++i32
)
867
32
      ShuffleMask.push_back(UndefValue::get(IntTy32));
868
4
869
4
    Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
870
4
                                            Builder.CreateBitCast(Op1, ShufTy),
871
4
                                            ConstantVector::get(ShuffleMask));
872
4
    return Builder.CreateBitCast(SV, II.getType());
873
4
  }
874
27
875
27
  // See if we're dealing with constant values.
876
27
  Constant *C0 = dyn_cast<Constant>(Op0);
877
27
  Constant *C1 = dyn_cast<Constant>(Op1);
878
27
  ConstantInt *CI00 =
879
27
      C0 ? 
dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))3
880
27
         : 
nullptr24
;
881
27
  ConstantInt *CI10 =
882
27
      C1 ? 
dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))14
883
27
         : 
nullptr13
;
884
27
885
27
  // Constant Fold - insert bottom Length bits starting at the Index'th bit.
886
27
  if (CI00 && 
CI103
) {
887
3
    APInt V00 = CI00->getValue();
888
3
    APInt V10 = CI10->getValue();
889
3
    APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
890
3
    V00 = V00 & ~Mask;
891
3
    V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
892
3
    APInt Val = V00 | V10;
893
3
    Type *IntTy64 = Type::getInt64Ty(II.getContext());
894
3
    Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
895
3
                        UndefValue::get(IntTy64)};
896
3
    return ConstantVector::get(Args);
897
3
  }
898
24
899
24
  // If we were an INSERTQ call, we'll save demanded elements if we convert to
900
24
  // INSERTQI.
901
24
  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
902
2
    Type *IntTy8 = Type::getInt8Ty(II.getContext());
903
2
    Constant *CILength = ConstantInt::get(IntTy8, Length, false);
904
2
    Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
905
2
906
2
    Value *Args[] = {Op0, Op1, CILength, CIIndex};
907
2
    Module *M = II.getModule();
908
2
    Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
909
2
    return Builder.CreateCall(F, Args);
910
2
  }
911
22
912
22
  return nullptr;
913
22
}
914
915
/// Attempt to convert pshufb* to shufflevector if the mask is constant.
916
static Value *simplifyX86pshufb(const IntrinsicInst &II,
917
55
                                InstCombiner::BuilderTy &Builder) {
918
55
  Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
919
55
  if (!V)
920
7
    return nullptr;
921
48
922
48
  auto *VecTy = cast<VectorType>(II.getType());
923
48
  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
924
48
  unsigned NumElts = VecTy->getNumElements();
925
48
  assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
926
48
         "Unexpected number of elements in shuffle mask!");
927
48
928
48
  // Construct a shuffle mask from constant integers or UNDEFs.
929
48
  Constant *Indexes[64] = {nullptr};
930
48
931
48
  // Each byte in the shuffle control mask forms an index to permute the
932
48
  // corresponding byte in the destination operand.
933
1.84k
  for (unsigned I = 0; I < NumElts; 
++I1.79k
) {
934
1.79k
    Constant *COp = V->getAggregateElement(I);
935
1.79k
    if (!COp || (!isa<UndefValue>(COp) && 
!isa<ConstantInt>(COp)1.76k
))
936
0
      return nullptr;
937
1.79k
938
1.79k
    if (isa<UndefValue>(COp)) {
939
28
      Indexes[I] = UndefValue::get(MaskEltTy);
940
28
      continue;
941
28
    }
942
1.76k
943
1.76k
    int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
944
1.76k
945
1.76k
    // If the most significant bit (bit[7]) of each byte of the shuffle
946
1.76k
    // control mask is set, then zero is written in the result byte.
947
1.76k
    // The zero vector is in the right-hand side of the resulting
948
1.76k
    // shufflevector.
949
1.76k
950
1.76k
    // The value of each index for the high 128-bit lane is the least
951
1.76k
    // significant 4 bits of the respective shuffle control byte.
952
1.76k
    Index = ((Index < 0) ? 
NumElts774
:
Index & 0x0F990
) + (I & 0xF0);
953
1.76k
    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
954
1.76k
  }
955
48
956
48
  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
957
48
  auto V1 = II.getArgOperand(0);
958
48
  auto V2 = Constant::getNullValue(VecTy);
959
48
  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
960
48
}
961
962
/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
963
static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
964
39
                                    InstCombiner::BuilderTy &Builder) {
965
39
  Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
966
39
  if (!V)
967
11
    return nullptr;
968
28
969
28
  auto *VecTy = cast<VectorType>(II.getType());
970
28
  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
971
28
  unsigned NumElts = VecTy->getVectorNumElements();
972
28
  bool IsPD = VecTy->getScalarType()->isDoubleTy();
973
28
  unsigned NumLaneElts = IsPD ? 
214
:
414
;
974
28
  assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
975
28
976
28
  // Construct a shuffle mask from constant integers or UNDEFs.
977
28
  Constant *Indexes[16] = {nullptr};
978
28
979
28
  // The intrinsics only read one or two bits, clear the rest.
980
214
  for (unsigned I = 0; I < NumElts; 
++I186
) {
981
186
    Constant *COp = V->getAggregateElement(I);
982
186
    if (!COp || (!isa<UndefValue>(COp) && 
!isa<ConstantInt>(COp)164
))
983
0
      return nullptr;
984
186
985
186
    if (isa<UndefValue>(COp)) {
986
22
      Indexes[I] = UndefValue::get(MaskEltTy);
987
22
      continue;
988
22
    }
989
164
990
164
    APInt Index = cast<ConstantInt>(COp)->getValue();
991
164
    Index = Index.zextOrTrunc(32).getLoBits(2);
992
164
993
164
    // The PD variants uses bit 1 to select per-lane element index, so
994
164
    // shift down to convert to generic shuffle mask index.
995
164
    if (IsPD)
996
53
      Index.lshrInPlace(1);
997
164
998
164
    // The _256 variants are a bit trickier since the mask bits always index
999
164
    // into the corresponding 128 half. In order to convert to a generic
1000
164
    // shuffle, we have to make that explicit.
1001
164
    Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1002
164
1003
164
    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1004
164
  }
1005
28
1006
28
  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1007
28
  auto V1 = II.getArgOperand(0);
1008
28
  auto V2 = UndefValue::get(V1->getType());
1009
28
  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1010
28
}
1011
1012
/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1013
static Value *simplifyX86vpermv(const IntrinsicInst &II,
1014
124
                                InstCombiner::BuilderTy &Builder) {
1015
124
  auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1016
124
  if (!V)
1017
3
    return nullptr;
1018
121
1019
121
  auto *VecTy = cast<VectorType>(II.getType());
1020
121
  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1021
121
  unsigned Size = VecTy->getNumElements();
1022
121
  assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1023
121
         "Unexpected shuffle mask size");
1024
121
1025
121
  // Construct a shuffle mask from constant integers or UNDEFs.
1026
121
  Constant *Indexes[64] = {nullptr};
1027
121
1028
2.11k
  for (unsigned I = 0; I < Size; 
++I1.99k
) {
1029
1.99k
    Constant *COp = V->getAggregateElement(I);
1030
1.99k
    if (!COp || (!isa<UndefValue>(COp) && 
!isa<ConstantInt>(COp)1.96k
))
1031
0
      return nullptr;
1032
1.99k
1033
1.99k
    if (isa<UndefValue>(COp)) {
1034
31
      Indexes[I] = UndefValue::get(MaskEltTy);
1035
31
      continue;
1036
31
    }
1037
1.96k
1038
1.96k
    uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1039
1.96k
    Index &= Size - 1;
1040
1.96k
    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1041
1.96k
  }
1042
121
1043
121
  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1044
121
  auto V1 = II.getArgOperand(0);
1045
121
  auto V2 = UndefValue::get(VecTy);
1046
121
  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1047
121
}
1048
1049
// TODO, Obvious Missing Transforms:
1050
// * Narrow width by halfs excluding zero/undef lanes
1051
91
Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
1052
91
  Value *LoadPtr = II.getArgOperand(0);
1053
91
  unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1054
91
1055
91
  // If the mask is all ones or undefs, this is a plain vector load of the 1st
1056
91
  // argument.
1057
91
  if (maskIsAllOneOrUndef(II.getArgOperand(2)))
1058
3
    return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1059
3
                                     "unmaskedload");
1060
88
1061
88
  // If we can unconditionally load from this address, replace with a
1062
88
  // load/select idiom. TODO: use DT for context sensitive query
1063
88
  if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
1064
88
                                         II.getModule()->getDataLayout(),
1065
88
                                         &II, nullptr)) {
1066
1
    Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1067
1
                                         "unmaskedload");
1068
1
    return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
1069
1
  }
1070
87
1071
87
  return nullptr;
1072
87
}
1073
1074
// TODO, Obvious Missing Transforms:
1075
// * Single constant active lane -> store
1076
// * Narrow width by halfs excluding zero/undef lanes
1077
216
Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
1078
216
  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1079
216
  if (!ConstMask)
1080
200
    return nullptr;
1081
16
1082
16
  // If the mask is all zeros, this instruction does nothing.
1083
16
  if (ConstMask->isNullValue())
1084
2
    return eraseInstFromFunction(II);
1085
14
1086
14
  // If the mask is all ones, this is a plain vector store of the 1st argument.
1087
14
  if (ConstMask->isAllOnesValue()) {
1088
2
    Value *StorePtr = II.getArgOperand(1);
1089
2
    unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1090
2
    return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1091
2
  }
1092
12
1093
12
  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1094
12
  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1095
12
  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1096
12
  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1097
1
                                            DemandedElts, UndefElts)) {
1098
1
    II.setOperand(0, V);
1099
1
    return &II;
1100
1
  }
1101
11
1102
11
  return nullptr;
1103
11
}
1104
1105
// TODO, Obvious Missing Transforms:
1106
// * Single constant active lane load -> load
1107
// * Dereferenceable address & few lanes -> scalarize speculative load/selects
1108
// * Adjacent vector addresses -> masked.load
1109
// * Narrow width by halfs excluding zero/undef lanes
1110
// * Vector splat address w/known mask -> scalar load
1111
// * Vector incrementing address -> vector masked load
1112
384
Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
1113
384
  return nullptr;
1114
384
}
1115
1116
// TODO, Obvious Missing Transforms:
1117
// * Single constant active lane -> store
1118
// * Adjacent vector addresses -> masked.store
1119
// * Narrow store width by halfs excluding zero/undef lanes
1120
// * Vector splat address w/known mask -> scalar store
1121
// * Vector incrementing address -> vector masked store
1122
100
Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) {
1123
100
  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1124
100
  if (!ConstMask)
1125
94
    return nullptr;
1126
6
1127
6
  // If the mask is all zeros, a scatter does nothing.
1128
6
  if (ConstMask->isNullValue())
1129
1
    return eraseInstFromFunction(II);
1130
5
1131
5
  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1132
5
  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1133
5
  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1134
5
  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1135
1
                                            DemandedElts, UndefElts)) {
1136
1
    II.setOperand(0, V);
1137
1
    return &II;
1138
1
  }
1139
4
  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
1140
1
                                            DemandedElts, UndefElts)) {
1141
1
    II.setOperand(1, V);
1142
1
    return &II;
1143
1
  }
1144
3
1145
3
  return nullptr;
1146
3
}
1147
1148
/// This function transforms launder.invariant.group and strip.invariant.group
1149
/// like:
1150
/// launder(launder(%x)) -> launder(%x)       (the result is not the argument)
1151
/// launder(strip(%x)) -> launder(%x)
1152
/// strip(strip(%x)) -> strip(%x)             (the result is not the argument)
1153
/// strip(launder(%x)) -> strip(%x)
1154
/// This is legal because it preserves the most recent information about
1155
/// the presence or absence of invariant.group.
1156
static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
1157
170
                                                    InstCombiner &IC) {
1158
170
  auto *Arg = II.getArgOperand(0);
1159
170
  auto *StrippedArg = Arg->stripPointerCasts();
1160
170
  auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1161
170
  if (StrippedArg == StrippedInvariantGroupsArg)
1162
152
    return nullptr; // No launders/strips to remove.
1163
18
1164
18
  Value *Result = nullptr;
1165
18
1166
18
  if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1167
5
    Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1168
13
  else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1169
13
    Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1170
13
  else
1171
13
    
llvm_unreachable0
(
1172
18
        "simplifyInvariantGroupIntrinsic only handles launder and strip");
1173
18
  if (Result->getType()->getPointerAddressSpace() !=
1174
18
      II.getType()->getPointerAddressSpace())
1175
1
    Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1176
18
  if (Result->getType() != II.getType())
1177
2
    Result = IC.Builder.CreateBitCast(Result, II.getType());
1178
18
1179
18
  return cast<Instruction>(Result);
1180
18
}
1181
1182
54.5k
static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1183
54.5k
  assert((II.getIntrinsicID() == Intrinsic::cttz ||
1184
54.5k
          II.getIntrinsicID() == Intrinsic::ctlz) &&
1185
54.5k
         "Expected cttz or ctlz intrinsic");
1186
54.5k
  bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1187
54.5k
  Value *Op0 = II.getArgOperand(0);
1188
54.5k
  Value *X;
1189
54.5k
  // ctlz(bitreverse(x)) -> cttz(x)
1190
54.5k
  // cttz(bitreverse(x)) -> ctlz(x)
1191
54.5k
  if (match(Op0, m_BitReverse(m_Value(X)))) {
1192
6
    Intrinsic::ID ID = IsTZ ? 
Intrinsic::ctlz3
:
Intrinsic::cttz3
;
1193
6
    Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
1194
6
    return CallInst::Create(F, {X, II.getArgOperand(1)});
1195
6
  }
1196
54.5k
1197
54.5k
  if (IsTZ) {
1198
12.4k
    // cttz(-x) -> cttz(x)
1199
12.4k
    if (match(Op0, m_Neg(m_Value(X)))) {
1200
5
      II.setOperand(0, X);
1201
5
      return &II;
1202
5
    }
1203
12.4k
1204
12.4k
    // cttz(abs(x)) -> cttz(x)
1205
12.4k
    // cttz(nabs(x)) -> cttz(x)
1206
12.4k
    Value *Y;
1207
12.4k
    SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
1208
12.4k
    if (SPF == SPF_ABS || 
SPF == SPF_NABS12.4k
) {
1209
10
      II.setOperand(0, X);
1210
10
      return &II;
1211
10
    }
1212
54.4k
  }
1213
54.4k
1214
54.4k
  KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1215
54.4k
1216
54.4k
  // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1217
54.4k
  unsigned PossibleZeros = IsTZ ? 
Known.countMaxTrailingZeros()12.4k
1218
54.4k
                                : 
Known.countMaxLeadingZeros()42.0k
;
1219
54.4k
  unsigned DefiniteZeros = IsTZ ? 
Known.countMinTrailingZeros()12.4k
1220
54.4k
                                : 
Known.countMinLeadingZeros()42.0k
;
1221
54.4k
1222
54.4k
  // If all bits above (ctlz) or below (cttz) the first known one are known
1223
54.4k
  // zero, this value is constant.
1224
54.4k
  // FIXME: This should be in InstSimplify because we're replacing an
1225
54.4k
  // instruction with a constant.
1226
54.4k
  if (PossibleZeros == DefiniteZeros) {
1227
4
    auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1228
4
    return IC.replaceInstUsesWith(II, C);
1229
4
  }
1230
54.4k
1231
54.4k
  // If the input to cttz/ctlz is known to be non-zero,
1232
54.4k
  // then change the 'ZeroIsUndef' parameter to 'true'
1233
54.4k
  // because we know the zero behavior can't affect the result.
1234
54.4k
  if (!Known.One.isNullValue() ||
1235
54.4k
      isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1236
54.4k
                     &IC.getDominatorTree())) {
1237
261
    if (!match(II.getArgOperand(1), m_One())) {
1238
5
      II.setOperand(1, IC.Builder.getTrue());
1239
5
      return &II;
1240
5
    }
1241
54.4k
  }
1242
54.4k
1243
54.4k
  // Add range metadata since known bits can't completely reflect what we know.
1244
54.4k
  // TODO: Handle splat vectors.
1245
54.4k
  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1246
54.4k
  if (IT && 
IT->getBitWidth() != 153.9k
&&
!II.getMetadata(LLVMContext::MD_range)53.9k
) {
1247
3.00k
    Metadata *LowAndHigh[] = {
1248
3.00k
        ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1249
3.00k
        ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1250
3.00k
    II.setMetadata(LLVMContext::MD_range,
1251
3.00k
                   MDNode::get(II.getContext(), LowAndHigh));
1252
3.00k
    return &II;
1253
3.00k
  }
1254
51.4k
1255
51.4k
  return nullptr;
1256
51.4k
}
1257
1258
29.6k
static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1259
29.6k
  assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1260
29.6k
         "Expected ctpop intrinsic");
1261
29.6k
  Value *Op0 = II.getArgOperand(0);
1262
29.6k
  Value *X;
1263
29.6k
  // ctpop(bitreverse(x)) -> ctpop(x)
1264
29.6k
  // ctpop(bswap(x)) -> ctpop(x)
1265
29.6k
  if (match(Op0, m_BitReverse(m_Value(X))) || 
match(Op0, m_BSwap(m_Value(X)))29.6k
) {
1266
4
    II.setOperand(0, X);
1267
4
    return &II;
1268
4
  }
1269
29.6k
1270
29.6k
  // FIXME: Try to simplify vectors of integers.
1271
29.6k
  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1272
29.6k
  if (!IT)
1273
504
    return nullptr;
1274
29.1k
1275
29.1k
  unsigned BitWidth = IT->getBitWidth();
1276
29.1k
  KnownBits Known(BitWidth);
1277
29.1k
  IC.computeKnownBits(Op0, Known, 0, &II);
1278
29.1k
1279
29.1k
  unsigned MinCount = Known.countMinPopulation();
1280
29.1k
  unsigned MaxCount = Known.countMaxPopulation();
1281
29.1k
1282
29.1k
  // Add range metadata since known bits can't completely reflect what we know.
1283
29.1k
  if (IT->getBitWidth() != 1 && 
!II.getMetadata(LLVMContext::MD_range)29.1k
) {
1284
897
    Metadata *LowAndHigh[] = {
1285
897
        ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1286
897
        ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1287
897
    II.setMetadata(LLVMContext::MD_range,
1288
897
                   MDNode::get(II.getContext(), LowAndHigh));
1289
897
    return &II;
1290
897
  }
1291
28.2k
1292
28.2k
  return nullptr;
1293
28.2k
}
1294
1295
// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1296
// XMM register mask efficiently, we could transform all x86 masked intrinsics
1297
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1298
12
static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1299
12
  Value *Ptr = II.getOperand(0);
1300
12
  Value *Mask = II.getOperand(1);
1301
12
  Constant *ZeroVec = Constant::getNullValue(II.getType());
1302
12
1303
12
  // Special case a zero mask since that's not a ConstantDataVector.
1304
12
  // This masked load instruction creates a zero vector.
1305
12
  if (isa<ConstantAggregateZero>(Mask))
1306
1
    return IC.replaceInstUsesWith(II, ZeroVec);
1307
11
1308
11
  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1309
11
  if (!ConstMask)
1310
1
    return nullptr;
1311
10
1312
10
  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1313
10
  // to allow target-independent optimizations.
1314
10
1315
10
  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1316
10
  // the LLVM intrinsic definition for the pointer argument.
1317
10
  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1318
10
  PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1319
10
  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1320
10
1321
10
  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1322
10
  // on each element's most significant bit (the sign bit).
1323
10
  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1324
10
1325
10
  // The pass-through vector for an x86 masked load is a zero vector.
1326
10
  CallInst *NewMaskedLoad =
1327
10
      IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1328
10
  return IC.replaceInstUsesWith(II, NewMaskedLoad);
1329
10
}
1330
1331
// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1332
// XMM register mask efficiently, we could transform all x86 masked intrinsics
1333
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1334
22
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1335
22
  Value *Ptr = II.getOperand(0);
1336
22
  Value *Mask = II.getOperand(1);
1337
22
  Value *Vec = II.getOperand(2);
1338
22
1339
22
  // Special case a zero mask since that's not a ConstantDataVector:
1340
22
  // this masked store instruction does nothing.
1341
22
  if (isa<ConstantAggregateZero>(Mask)) {
1342
2
    IC.eraseInstFromFunction(II);
1343
2
    return true;
1344
2
  }
1345
20
1346
20
  // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1347
20
  // anything else at this level.
1348
20
  if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1349
9
    return false;
1350
11
1351
11
  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1352
11
  if (!ConstMask)
1353
1
    return false;
1354
10
1355
10
  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1356
10
  // to allow target-independent optimizations.
1357
10
1358
10
  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1359
10
  // the LLVM intrinsic definition for the pointer argument.
1360
10
  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1361
10
  PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1362
10
  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1363
10
1364
10
  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1365
10
  // on each element's most significant bit (the sign bit).
1366
10
  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1367
10
1368
10
  IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1369
10
1370
10
  // 'Replace uses' doesn't work for stores. Erase the original masked store.
1371
10
  IC.eraseInstFromFunction(II);
1372
10
  return true;
1373
10
}
1374
1375
// Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1376
//
1377
// A single NaN input is folded to minnum, so we rely on that folding for
1378
// handling NaNs.
1379
static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1380
6
                           const APFloat &Src2) {
1381
6
  APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1382
6
1383
6
  APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1384
6
  assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1385
6
  if (Cmp0 == APFloat::cmpEqual)
1386
2
    return maxnum(Src1, Src2);
1387
4
1388
4
  APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1389
4
  assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1390
4
  if (Cmp1 == APFloat::cmpEqual)
1391
2
    return maxnum(Src0, Src2);
1392
2
1393
2
  return maxnum(Src0, Src1);
1394
2
}
1395
1396
/// Convert a table lookup to shufflevector if the mask is constant.
1397
/// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1398
/// which case we could lower the shufflevector with rev64 instructions
1399
/// as it's actually a byte reverse.
1400
static Value *simplifyNeonTbl1(const IntrinsicInst &II,
1401
301
                               InstCombiner::BuilderTy &Builder) {
1402
301
  // Bail out if the mask is not a constant.
1403
301
  auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1404
301
  if (!C)
1405
81
    return nullptr;
1406
220
1407
220
  auto *VecTy = cast<VectorType>(II.getType());
1408
220
  unsigned NumElts = VecTy->getNumElements();
1409
220
1410
220
  // Only perform this transformation for <8 x i8> vector types.
1411
220
  if (!VecTy->getElementType()->isIntegerTy(8) || 
NumElts != 8219
)
1412
47
    return nullptr;
1413
173
1414
173
  uint32_t Indexes[8];
1415
173
1416
272
  for (unsigned I = 0; I < NumElts; 
++I99
) {
1417
270
    Constant *COp = C->getAggregateElement(I);
1418
270
1419
270
    if (!COp || !isa<ConstantInt>(COp))
1420
0
      return nullptr;
1421
270
1422
270
    Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1423
270
1424
270
    // Make sure the mask indices are in range.
1425
270
    if (Indexes[I] >= NumElts)
1426
171
      return nullptr;
1427
270
  }
1428
173
1429
173
  auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1430
2
                                              makeArrayRef(Indexes));
1431
2
  auto *V1 = II.getArgOperand(0);
1432
2
  auto *V2 = Constant::getNullValue(V1->getType());
1433
2
  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1434
173
}
1435
1436
/// Convert a vector load intrinsic into a simple llvm load instruction.
1437
/// This is beneficial when the underlying object being addressed comes
1438
/// from a constant, since we get constant-folding for free.
1439
static Value *simplifyNeonVld1(const IntrinsicInst &II,
1440
                               unsigned MemAlign,
1441
10
                               InstCombiner::BuilderTy &Builder) {
1442
10
  auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1443
10
1444
10
  if (!IntrAlign)
1445
1
    return nullptr;
1446
9
1447
9
  unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1448
9
                       
MemAlign0
: IntrAlign->getLimitedValue();
1449
9
1450
9
  if (!isPowerOf2_32(Alignment))
1451
1
    return nullptr;
1452
8
1453
8
  auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1454
8
                                          PointerType::get(II.getType(), 0));
1455
8
  return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1456
8
}
1457
1458
// Returns true iff the 2 intrinsics have the same operands, limiting the
1459
// comparison to the first NumOperands.
1460
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1461
30
                             unsigned NumOperands) {
1462
30
  assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1463
30
  assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1464
80
  for (unsigned i = 0; i < NumOperands; 
i++50
)
1465
54
    if (I.getArgOperand(i) != E.getArgOperand(i))
1466
4
      return false;
1467
30
  
return true26
;
1468
30
}
1469
1470
// Remove trivially empty start/end intrinsic ranges, i.e. a start
1471
// immediately followed by an end (ignoring debuginfo or other
1472
// start/end intrinsics in between). As this handles only the most trivial
1473
// cases, tracking the nesting level is not needed:
1474
//
1475
//   call @llvm.foo.start(i1 0) ; &I
1476
//   call @llvm.foo.start(i1 0)
1477
//   call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1478
//   call @llvm.foo.end(i1 0)
1479
static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1480
1.06M
                                      unsigned EndID, InstCombiner &IC) {
1481
1.06M
  assert(I.getIntrinsicID() == StartID &&
1482
1.06M
         "Start intrinsic does not have expected ID");
1483
1.06M
  BasicBlock::iterator BI(I), BE(I.getParent()->end());
1484
1.09M
  for (++BI; BI != BE; 
++BI33.1k
) {
1485
1.09M
    if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1486
93.4k
      if (isa<DbgInfoIntrinsic>(E) || 
E->getIntrinsicID() == StartID93.4k
)
1487
33.1k
        continue;
1488
60.3k
      if (E->getIntrinsicID() == EndID &&
1489
60.3k
          
haveSameOperands(I, *E, E->getNumArgOperands())30
) {
1490
26
        IC.eraseInstFromFunction(*E);
1491
26
        IC.eraseInstFromFunction(I);
1492
26
        return true;
1493
26
      }
1494
1.06M
    }
1495
1.06M
    break;
1496
1.06M
  }
1497
1.06M
1498
1.06M
  
return false1.06M
;
1499
1.06M
}
1500
1501
// Convert NVVM intrinsics to target-generic LLVM code where possible.
1502
3.45M
static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1503
3.45M
  // Each NVVM intrinsic we can simplify can be replaced with one of:
1504
3.45M
  //
1505
3.45M
  //  * an LLVM intrinsic,
1506
3.45M
  //  * an LLVM cast operation,
1507
3.45M
  //  * an LLVM binary operation, or
1508
3.45M
  //  * ad-hoc LLVM IR for the particular operation.
1509
3.45M
1510
3.45M
  // Some transformations are only valid when the module's
1511
3.45M
  // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1512
3.45M
  // transformations are valid regardless of the module's ftz setting.
1513
3.45M
  enum FtzRequirementTy {
1514
3.45M
    FTZ_Any,       // Any ftz setting is ok.
1515
3.45M
    FTZ_MustBeOn,  // Transformation is valid only if ftz is on.
1516
3.45M
    FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1517
3.45M
  };
1518
3.45M
  // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1519
3.45M
  // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1520
3.45M
  // simplify.
1521
3.45M
  enum SpecialCase {
1522
3.45M
    SPC_Reciprocal,
1523
3.45M
  };
1524
3.45M
1525
3.45M
  // SimplifyAction is a poor-man's variant (plus an additional flag) that
1526
3.45M
  // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1527
3.45M
  struct SimplifyAction {
1528
3.45M
    // Invariant: At most one of these Optionals has a value.
1529
3.45M
    Optional<Intrinsic::ID> IID;
1530
3.45M
    Optional<Instruction::CastOps> CastOp;
1531
3.45M
    Optional<Instruction::BinaryOps> BinaryOp;
1532
3.45M
    Optional<SpecialCase> Special;
1533
3.45M
1534
3.45M
    FtzRequirementTy FtzRequirement = FTZ_Any;
1535
3.45M
1536
3.45M
    SimplifyAction() = defaul
t3.45M
;
1537
3.45M
1538
3.45M
    SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1539
3.45M
        : IID(IID), FtzRequirement(FtzReq) 
{}56
1540
3.45M
1541
3.45M
    // Cast operations don't have anything to do with FTZ, so we skip that
1542
3.45M
    // argument.
1543
3.45M
    SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) 
{}32
1544
3.45M
1545
3.45M
    SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1546
3.45M
        : BinaryOp(BinaryOp), FtzRequirement(FtzReq) 
{}18
1547
3.45M
1548
3.45M
    SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1549
3.45M
        : Special(Special), FtzRequirement(FtzReq) 
{}4
1550
3.45M
  };
1551
3.45M
1552
3.45M
  // Try to generate a SimplifyAction describing how to replace our
1553
3.45M
  // IntrinsicInstr with target-generic LLVM IR.
1554
3.45M
  const SimplifyAction Action = [II]() -> SimplifyAction {
1555
3.45M
    switch (II->getIntrinsicID()) {
1556
3.45M
    // NVVM intrinsics that map directly to LLVM intrinsics.
1557
3.45M
    case Intrinsic::nvvm_ceil_d:
1558
2
      return {Intrinsic::ceil, FTZ_Any};
1559
3.45M
    case Intrinsic::nvvm_ceil_f:
1560
2
      return {Intrinsic::ceil, FTZ_MustBeOff};
1561
3.45M
    case Intrinsic::nvvm_ceil_ftz_f:
1562
2
      return {Intrinsic::ceil, FTZ_MustBeOn};
1563
3.45M
    case Intrinsic::nvvm_fabs_d:
1564
2
      return {Intrinsic::fabs, FTZ_Any};
1565
3.45M
    case Intrinsic::nvvm_fabs_f:
1566
2
      return {Intrinsic::fabs, FTZ_MustBeOff};
1567
3.45M
    case Intrinsic::nvvm_fabs_ftz_f:
1568
2
      return {Intrinsic::fabs, FTZ_MustBeOn};
1569
3.45M
    case Intrinsic::nvvm_floor_d:
1570
2
      return {Intrinsic::floor, FTZ_Any};
1571
3.45M
    case Intrinsic::nvvm_floor_f:
1572
2
      return {Intrinsic::floor, FTZ_MustBeOff};
1573
3.45M
    case Intrinsic::nvvm_floor_ftz_f:
1574
2
      return {Intrinsic::floor, FTZ_MustBeOn};
1575
3.45M
    case Intrinsic::nvvm_fma_rn_d:
1576
2
      return {Intrinsic::fma, FTZ_Any};
1577
3.45M
    case Intrinsic::nvvm_fma_rn_f:
1578
2
      return {Intrinsic::fma, FTZ_MustBeOff};
1579
3.45M
    case Intrinsic::nvvm_fma_rn_ftz_f:
1580
2
      return {Intrinsic::fma, FTZ_MustBeOn};
1581
3.45M
    case Intrinsic::nvvm_fmax_d:
1582
2
      return {Intrinsic::maxnum, FTZ_Any};
1583
3.45M
    case Intrinsic::nvvm_fmax_f:
1584
2
      return {Intrinsic::maxnum, FTZ_MustBeOff};
1585
3.45M
    case Intrinsic::nvvm_fmax_ftz_f:
1586
2
      return {Intrinsic::maxnum, FTZ_MustBeOn};
1587
3.45M
    case Intrinsic::nvvm_fmin_d:
1588
2
      return {Intrinsic::minnum, FTZ_Any};
1589
3.45M
    case Intrinsic::nvvm_fmin_f:
1590
2
      return {Intrinsic::minnum, FTZ_MustBeOff};
1591
3.45M
    case Intrinsic::nvvm_fmin_ftz_f:
1592
2
      return {Intrinsic::minnum, FTZ_MustBeOn};
1593
3.45M
    case Intrinsic::nvvm_round_d:
1594
2
      return {Intrinsic::round, FTZ_Any};
1595
3.45M
    case Intrinsic::nvvm_round_f:
1596
2
      return {Intrinsic::round, FTZ_MustBeOff};
1597
3.45M
    case Intrinsic::nvvm_round_ftz_f:
1598
2
      return {Intrinsic::round, FTZ_MustBeOn};
1599
3.45M
    case Intrinsic::nvvm_sqrt_rn_d:
1600
2
      return {Intrinsic::sqrt, FTZ_Any};
1601
3.45M
    case Intrinsic::nvvm_sqrt_f:
1602
2
      // nvvm_sqrt_f is a special case.  For  most intrinsics, foo_ftz_f is the
1603
2
      // ftz version, and foo_f is the non-ftz version.  But nvvm_sqrt_f adopts
1604
2
      // the ftz-ness of the surrounding code.  sqrt_rn_f and sqrt_rn_ftz_f are
1605
2
      // the versions with explicit ftz-ness.
1606
2
      return {Intrinsic::sqrt, FTZ_Any};
1607
3.45M
    case Intrinsic::nvvm_sqrt_rn_f:
1608
2
      return {Intrinsic::sqrt, FTZ_MustBeOff};
1609
3.45M
    case Intrinsic::nvvm_sqrt_rn_ftz_f:
1610
2
      return {Intrinsic::sqrt, FTZ_MustBeOn};
1611
3.45M
    case Intrinsic::nvvm_trunc_d:
1612
2
      return {Intrinsic::trunc, FTZ_Any};
1613
3.45M
    case Intrinsic::nvvm_trunc_f:
1614
2
      return {Intrinsic::trunc, FTZ_MustBeOff};
1615
3.45M
    case Intrinsic::nvvm_trunc_ftz_f:
1616
2
      return {Intrinsic::trunc, FTZ_MustBeOn};
1617
3.45M
1618
3.45M
    // NVVM intrinsics that map to LLVM cast operations.
1619
3.45M
    //
1620
3.45M
    // Note that llvm's target-generic conversion operators correspond to the rz
1621
3.45M
    // (round to zero) versions of the nvvm conversion intrinsics, even though
1622
3.45M
    // most everything else here uses the rn (round to nearest even) nvvm ops.
1623
3.45M
    case Intrinsic::nvvm_d2i_rz:
1624
8
    case Intrinsic::nvvm_f2i_rz:
1625
8
    case Intrinsic::nvvm_d2ll_rz:
1626
8
    case Intrinsic::nvvm_f2ll_rz:
1627
8
      return {Instruction::FPToSI};
1628
8
    case Intrinsic::nvvm_d2ui_rz:
1629
8
    case Intrinsic::nvvm_f2ui_rz:
1630
8
    case Intrinsic::nvvm_d2ull_rz:
1631
8
    case Intrinsic::nvvm_f2ull_rz:
1632
8
      return {Instruction::FPToUI};
1633
8
    case Intrinsic::nvvm_i2d_rz:
1634
8
    case Intrinsic::nvvm_i2f_rz:
1635
8
    case Intrinsic::nvvm_ll2d_rz:
1636
8
    case Intrinsic::nvvm_ll2f_rz:
1637
8
      return {Instruction::SIToFP};
1638
8
    case Intrinsic::nvvm_ui2d_rz:
1639
8
    case Intrinsic::nvvm_ui2f_rz:
1640
8
    case Intrinsic::nvvm_ull2d_rz:
1641
8
    case Intrinsic::nvvm_ull2f_rz:
1642
8
      return {Instruction::UIToFP};
1643
8
1644
8
    // NVVM intrinsics that map to LLVM binary ops.
1645
8
    case Intrinsic::nvvm_add_rn_d:
1646
2
      return {Instruction::FAdd, FTZ_Any};
1647
8
    case Intrinsic::nvvm_add_rn_f:
1648
2
      return {Instruction::FAdd, FTZ_MustBeOff};
1649
8
    case Intrinsic::nvvm_add_rn_ftz_f:
1650
2
      return {Instruction::FAdd, FTZ_MustBeOn};
1651
8
    case Intrinsic::nvvm_mul_rn_d:
1652
2
      return {Instruction::FMul, FTZ_Any};
1653
8
    case Intrinsic::nvvm_mul_rn_f:
1654
2
      return {Instruction::FMul, FTZ_MustBeOff};
1655
8
    case Intrinsic::nvvm_mul_rn_ftz_f:
1656
2
      return {Instruction::FMul, FTZ_MustBeOn};
1657
8
    case Intrinsic::nvvm_div_rn_d:
1658
2
      return {Instruction::FDiv, FTZ_Any};
1659
8
    case Intrinsic::nvvm_div_rn_f:
1660
2
      return {Instruction::FDiv, FTZ_MustBeOff};
1661
8
    case Intrinsic::nvvm_div_rn_ftz_f:
1662
2
      return {Instruction::FDiv, FTZ_MustBeOn};
1663
8
1664
8
    // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1665
8
    // need special handling.
1666
8
    //
1667
8
    // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1668
8
    // as well.
1669
8
    case Intrinsic::nvvm_rcp_rn_d:
1670
0
      return {SPC_Reciprocal, FTZ_Any};
1671
8
    case Intrinsic::nvvm_rcp_rn_f:
1672
2
      return {SPC_Reciprocal, FTZ_MustBeOff};
1673
8
    case Intrinsic::nvvm_rcp_rn_ftz_f:
1674
2
      return {SPC_Reciprocal, FTZ_MustBeOn};
1675
8
1676
8
    // We do not currently simplify intrinsics that give an approximate answer.
1677
8
    // These include:
1678
8
    //
1679
8
    //   - nvvm_cos_approx_{f,ftz_f}
1680
8
    //   - nvvm_ex2_approx_{d,f,ftz_f}
1681
8
    //   - nvvm_lg2_approx_{d,f,ftz_f}
1682
8
    //   - nvvm_sin_approx_{f,ftz_f}
1683
8
    //   - nvvm_sqrt_approx_{f,ftz_f}
1684
8
    //   - nvvm_rsqrt_approx_{d,f,ftz_f}
1685
8
    //   - nvvm_div_approx_{ftz_d,ftz_f,f}
1686
8
    //   - nvvm_rcp_approx_ftz_d
1687
8
    //
1688
8
    // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1689
8
    // means that fastmath is enabled in the intrinsic.  Unfortunately only
1690
8
    // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1691
8
    // information gets lost and we can't select on it.
1692
8
    //
1693
8
    // TODO: div and rcp are lowered to a binary op, so these we could in theory
1694
8
    // lower them to "fast fdiv".
1695
8
1696
3.45M
    default:
1697
3.45M
      return {};
1698
3.45M
    }
1699
3.45M
  }();
1700
3.45M
1701
3.45M
  // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1702
3.45M
  // can bail out now.  (Notice that in the case that IID is not an NVVM
1703
3.45M
  // intrinsic, we don't have to look up any module metadata, as
1704
3.45M
  // FtzRequirementTy will be FTZ_Any.)
1705
3.45M
  if (Action.FtzRequirement != FTZ_Any) {
1706
52
    bool FtzEnabled =
1707
52
        II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1708
52
        "true";
1709
52
1710
52
    if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1711
26
      return nullptr;
1712
3.45M
  }
1713
3.45M
1714
3.45M
  // Simplify to target-generic intrinsic.
1715
3.45M
  if (Action.IID) {
1716
38
    SmallVector<Value *, 4> Args(II->arg_operands());
1717
38
    // All the target-generic intrinsics currently of interest to us have one
1718
38
    // type argument, equal to that of the nvvm intrinsic's argument.
1719
38
    Type *Tys[] = {II->getArgOperand(0)->getType()};
1720
38
    return CallInst::Create(
1721
38
        Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1722
38
  }
1723
3.45M
1724
3.45M
  // Simplify to target-generic binary op.
1725
3.45M
  if (Action.BinaryOp)
1726
12
    return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1727
12
                                  II->getArgOperand(1), II->getName());
1728
3.45M
1729
3.45M
  // Simplify to target-generic cast op.
1730
3.45M
  if (Action.CastOp)
1731
32
    return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1732
32
                            II->getName());
1733
3.45M
1734
3.45M
  // All that's left are the special cases.
1735
3.45M
  if (!Action.Special)
1736
3.45M
    return nullptr;
1737
2
1738
2
  switch (*Action.Special) {
1739
2
  case SPC_Reciprocal:
1740
2
    // Simplify reciprocal.
1741
2
    return BinaryOperator::Create(
1742
2
        Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1743
2
        II->getArgOperand(0), II->getName());
1744
0
  }
1745
0
  llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1746
0
}
1747
1748
2.99k
Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1749
2.99k
  removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1750
2.99k
  return nullptr;
1751
2.99k
}
1752
1753
2.26k
Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1754
2.26k
  removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1755
2.26k
  return nullptr;
1756
2.26k
}
1757
1758
15.6k
static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) {
1759
15.6k
  assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1760
15.6k
  Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1761
15.6k
  if (isa<Constant>(Arg0) && 
!isa<Constant>(Arg1)324
) {
1762
323
    Call.setArgOperand(0, Arg1);
1763
323
    Call.setArgOperand(1, Arg0);
1764
323
    return &Call;
1765
323
  }
1766
15.3k
  return nullptr;
1767
15.3k
}
1768
1769
12.7k
Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
1770
12.7k
  WithOverflowInst *WO = cast<WithOverflowInst>(II);
1771
12.7k
  Value *OperationResult = nullptr;
1772
12.7k
  Constant *OverflowResult = nullptr;
1773
12.7k
  if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
1774
12.7k
                            WO->getRHS(), *WO, OperationResult, OverflowResult))
1775
328
    return CreateOverflowTuple(WO, OperationResult, OverflowResult);
1776
12.4k
  return nullptr;
1777
12.4k
}
1778
1779
/// CallInst simplification. This mostly only handles folding of intrinsic
1780
/// instructions. For normal calls, it allows visitCallBase to do the heavy
1781
/// lifting.
1782
20.9M
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1783
20.9M
  if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1784
35
    return replaceInstUsesWith(CI, V);
1785
20.9M
1786
20.9M
  if (isFreeCall(&CI, &TLI))
1787
372k
    return visitFree(CI);
1788
20.5M
1789
20.5M
  // If the caller function is nounwind, mark the call as nounwind, even if the
1790
20.5M
  // callee isn't.
1791
20.5M
  if (CI.getFunction()->doesNotThrow() && 
!CI.doesNotThrow()18.4M
) {
1792
177k
    CI.setDoesNotThrow();
1793
177k
    return &CI;
1794
177k
  }
1795
20.3M
1796
20.3M
  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1797
20.3M
  if (!II) 
return visitCallBase(CI)16.8M
;
1798
3.46M
1799
3.46M
  // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1800
3.46M
  // instead of in visitCallBase.
1801
3.46M
  if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1802
538k
    bool Changed = false;
1803
538k
1804
538k
    // memmove/cpy/set of zero bytes is a noop.
1805
538k
    if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1806
473k
      if (NumBytes->isNullValue())
1807
3.11k
        return eraseInstFromFunction(CI);
1808
470k
1809
470k
      if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1810
470k
        if (CI->getZExtValue() == 1) {
1811
270
          // Replace the instruction with just byte operations.  We would
1812
270
          // transform other cases to loads/stores, but we don't know if
1813
270
          // alignment is sufficient.
1814
270
        }
1815
470k
    }
1816
538k
1817
538k
    // No other transformations apply to volatile transfers.
1818
538k
    
if (auto *535k
M535k
= dyn_cast<MemIntrinsic>(MI))
1819
535k
      if (M->isVolatile())
1820
489
        return nullptr;
1821
534k
1822
534k
    // If we have a memmove and the source operation is a constant global,
1823
534k
    // then the source and dest pointers can't alias, so we can change this
1824
534k
    // into a call to memcpy.
1825
534k
    if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1826
7.77k
      if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1827
14
        if (GVSrc->isConstant()) {
1828
5
          Module *M = CI.getModule();
1829
5
          Intrinsic::ID MemCpyID =
1830
5
              isa<AtomicMemMoveInst>(MMI)
1831
5
                  ? 
Intrinsic::memcpy_element_unordered_atomic1
1832
5
                  : 
Intrinsic::memcpy4
;
1833
5
          Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1834
5
                           CI.getArgOperand(1)->getType(),
1835
5
                           CI.getArgOperand(2)->getType() };
1836
5
          CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1837
5
          Changed = true;
1838
5
        }
1839
7.77k
    }
1840
534k
1841
534k
    if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1842
242k
      // memmove(x,x,size) -> noop.
1843
242k
      if (MTI->getSource() == MTI->getDest())
1844
18
        return eraseInstFromFunction(CI);
1845
534k
    }
1846
534k
1847
534k
    // If we can determine a pointer alignment that is bigger than currently
1848
534k
    // set, update the alignment.
1849
534k
    if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1850
242k
      if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1851
4.21k
        return I;
1852
292k
    } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1853
292k
      if (Instruction *I = SimplifyAnyMemSet(MSI))
1854
2.34k
        return I;
1855
528k
    }
1856
528k
1857
528k
    if (Changed) 
return II0
;
1858
3.45M
  }
1859
3.45M
1860
3.45M
  // For vector result intrinsics, use the generic demanded vector support.
1861
3.45M
  if (II->getType()->isVectorTy()) {
1862
50.5k
    auto VWidth = II->getType()->getVectorNumElements();
1863
50.5k
    APInt UndefElts(VWidth, 0);
1864
50.5k
    APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
1865
50.5k
    if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1866
133
      if (V != II)
1867
36
        return replaceInstUsesWith(*II, V);
1868
97
      return II;
1869
97
    }
1870
50.5k
  }
1871
3.45M
1872
3.45M
  if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1873
84
    return I;
1874
3.45M
1875
3.45M
  auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1876
3.45M
                                              unsigned DemandedWidth) {
1877
468
    APInt UndefElts(Width, 0);
1878
468
    APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1879
468
    return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1880
468
  };
1881
3.45M
1882
3.45M
  Intrinsic::ID IID = II->getIntrinsicID();
1883
3.45M
  switch (IID) {
1884
3.45M
  
default: break2.20M
;
1885
3.45M
  case Intrinsic::objectsize:
1886
44.0k
    if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1887
382
      return replaceInstUsesWith(CI, V);
1888
43.6k
    return nullptr;
1889
43.6k
  case Intrinsic::bswap: {
1890
5.35k
    Value *IIOperand = II->getArgOperand(0);
1891
5.35k
    Value *X = nullptr;
1892
5.35k
1893
5.35k
    // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1894
5.35k
    if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1895
2
      unsigned C = X->getType()->getPrimitiveSizeInBits() -
1896
2
        IIOperand->getType()->getPrimitiveSizeInBits();
1897
2
      Value *CV = ConstantInt::get(X->getType(), C);
1898
2
      Value *V = Builder.CreateLShr(X, CV);
1899
2
      return new TruncInst(V, IIOperand->getType());
1900
2
    }
1901
5.34k
    break;
1902
5.34k
  }
1903
5.34k
  case Intrinsic::masked_load:
1904
91
    if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1905
4
      return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1906
87
    break;
1907
216
  case Intrinsic::masked_store:
1908
216
    return simplifyMaskedStore(*II);
1909
384
  case Intrinsic::masked_gather:
1910
384
    return simplifyMaskedGather(*II);
1911
100
  case Intrinsic::masked_scatter:
1912
100
    return simplifyMaskedScatter(*II);
1913
170
  case Intrinsic::launder_invariant_group:
1914
170
  case Intrinsic::strip_invariant_group:
1915
170
    if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1916
18
      return replaceInstUsesWith(*II, SkippedBarrier);
1917
152
    break;
1918
152
  case Intrinsic::powi:
1919
22
    if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1920
5
      // 0 and 1 are handled in instsimplify
1921
5
1922
5
      // powi(x, -1) -> 1/x
1923
5
      if (Power->isMinusOne())
1924
1
        return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1925
1
                                          II->getArgOperand(0));
1926
4
      // powi(x, 2) -> x*x
1927
4
      if (Power->equalsInt(2))
1928
1
        return BinaryOperator::CreateFMul(II->getArgOperand(0),
1929
1
                                          II->getArgOperand(0));
1930
20
    }
1931
20
    break;
1932
20
1933
54.5k
  case Intrinsic::cttz:
1934
54.5k
  case Intrinsic::ctlz:
1935
54.5k
    if (auto *I = foldCttzCtlz(*II, *this))
1936
3.03k
      return I;
1937
51.4k
    break;
1938
51.4k
1939
51.4k
  case Intrinsic::ctpop:
1940
29.6k
    if (auto *I = foldCtpop(*II, *this))
1941
901
      return I;
1942
28.7k
    break;
1943
28.7k
1944
28.7k
  case Intrinsic::fshl:
1945
570
  case Intrinsic::fshr: {
1946
570
    Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1947
570
    Type *Ty = II->getType();
1948
570
    unsigned BitWidth = Ty->getScalarSizeInBits();
1949
570
    Constant *ShAmtC;
1950
570
    if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
1951
570
        
!isa<ConstantExpr>(ShAmtC)385
&&
!ShAmtC->containsConstantExpression()384
) {
1952
383
      // Canonicalize a shift amount constant operand to modulo the bit-width.
1953
383
      Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1954
383
      Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1955
383
      if (ModuloC != ShAmtC) {
1956
13
        II->setArgOperand(2, ModuloC);
1957
13
        return II;
1958
13
      }
1959
370
      assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1960
370
                 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1961
370
             "Shift amount expected to be modulo bitwidth");
1962
370
1963
370
      // Canonicalize funnel shift right by constant to funnel shift left. This
1964
370
      // is not entirely arbitrary. For historical reasons, the backend may
1965
370
      // recognize rotate left patterns but miss rotate right patterns.
1966
370
      if (IID == Intrinsic::fshr) {
1967
25
        // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1968
25
        Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1969
25
        Module *Mod = II->getModule();
1970
25
        Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1971
25
        return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1972
25
      }
1973
345
      assert(IID == Intrinsic::fshl &&
1974
345
             "All funnel shifts by simple constants should go left");
1975
345
1976
345
      // fshl(X, 0, C) --> shl X, C
1977
345
      // fshl(X, undef, C) --> shl X, C
1978
345
      if (match(Op1, m_ZeroInt()) || 
match(Op1, m_Undef())338
)
1979
13
        return BinaryOperator::CreateShl(Op0, ShAmtC);
1980
332
1981
332
      // fshl(0, X, C) --> lshr X, (BW-C)
1982
332
      // fshl(undef, X, C) --> lshr X, (BW-C)
1983
332
      if (match(Op0, m_ZeroInt()) || 
match(Op0, m_Undef())325
)
1984
15
        return BinaryOperator::CreateLShr(Op1,
1985
15
                                          ConstantExpr::getSub(WidthC, ShAmtC));
1986
317
1987
317
      // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1988
317
      if (Op0 == Op1 && 
BitWidth == 16289
&&
match(ShAmtC, m_SpecificInt(8))5
) {
1989
3
        Module *Mod = II->getModule();
1990
3
        Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1991
3
        return CallInst::Create(Bswap, { Op0 });
1992
3
      }
1993
501
    }
1994
501
1995
501
    // Left or right might be masked.
1996
501
    if (SimplifyDemandedInstructionBits(*II))
1997
10
      return &CI;
1998
491
1999
491
    // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2000
491
    // so only the low bits of the shift amount are demanded if the bitwidth is
2001
491
    // a power-of-2.
2002
491
    if (!isPowerOf2_32(BitWidth))
2003
25
      break;
2004
466
    APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2005
466
    KnownBits Op2Known(BitWidth);
2006
466
    if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2007
16
      return &CI;
2008
450
    break;
2009
450
  }
2010
450
  case Intrinsic::uadd_with_overflow:
2011
375
  case Intrinsic::sadd_with_overflow: {
2012
375
    if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2013
5
      return I;
2014
370
    if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2015
15
      return I;
2016
355
2017
355
    // Given 2 constant operands whose sum does not overflow:
2018
355
    // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2019
355
    // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2020
355
    Value *X;
2021
355
    const APInt *C0, *C1;
2022
355
    Value *Arg0 = II->getArgOperand(0);
2023
355
    Value *Arg1 = II->getArgOperand(1);
2024
355
    bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2025
355
    bool HasNWAdd = IsSigned ? 
match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))43
2026
355
                             : 
match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)))312
;
2027
355
    if (HasNWAdd && 
match(Arg1, m_APInt(C1))16
) {
2028
16
      bool Overflow;
2029
16
      APInt NewC =
2030
16
          IsSigned ? 
C1->sadd_ov(*C0, Overflow)12
:
C1->uadd_ov(*C0, Overflow)4
;
2031
16
      if (!Overflow)
2032
14
        return replaceInstUsesWith(
2033
14
            *II, Builder.CreateBinaryIntrinsic(
2034
14
                     IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2035
341
    }
2036
341
    break;
2037
341
  }
2038
341
2039
12.3k
  case Intrinsic::umul_with_overflow:
2040
12.3k
  case Intrinsic::smul_with_overflow:
2041
12.3k
    if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2042
263
      return I;
2043
12.1k
    LLVM_FALLTHROUGH;
2044
12.1k
2045
12.3k
  case Intrinsic::usub_with_overflow:
2046
12.3k
    if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2047
308
      return I;
2048
12.0k
    break;
2049
12.0k
2050
12.0k
  case Intrinsic::ssub_with_overflow: {
2051
21
    if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2052
5
      return I;
2053
16
2054
16
    Constant *C;
2055
16
    Value *Arg0 = II->getArgOperand(0);
2056
16
    Value *Arg1 = II->getArgOperand(1);
2057
16
    // Given a constant C that is not the minimum signed value
2058
16
    // for an integer of a given bit width:
2059
16
    //
2060
16
    // ssubo X, C -> saddo X, -C
2061
16
    if (match(Arg1, m_Constant(C)) && 
C->isNotMinSignedValue()13
) {
2062
10
      Value *NegVal = ConstantExpr::getNeg(C);
2063
10
      // Build a saddo call that is equivalent to the discovered
2064
10
      // ssubo call.
2065
10
      return replaceInstUsesWith(
2066
10
          *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2067
10
                                             Arg0, NegVal));
2068
10
    }
2069
6
2070
6
    break;
2071
6
  }
2072
6
2073
1.02k
  case Intrinsic::uadd_sat:
2074
1.02k
  case Intrinsic::sadd_sat:
2075
1.02k
    if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2076
22
      return I;
2077
1.00k
    LLVM_FALLTHROUGH;
2078
1.67k
  case Intrinsic::usub_sat:
2079
1.67k
  case Intrinsic::ssub_sat: {
2080
1.67k
    SaturatingInst *SI = cast<SaturatingInst>(II);
2081
1.67k
    Type *Ty = SI->getType();
2082
1.67k
    Value *Arg0 = SI->getLHS();
2083
1.67k
    Value *Arg1 = SI->getRHS();
2084
1.67k
2085
1.67k
    // Make use of known overflow information.
2086
1.67k
    OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2087
1.67k
                                        Arg0, Arg1, SI);
2088
1.67k
    switch (OR) {
2089
1.67k
      case OverflowResult::MayOverflow:
2090
1.63k
        break;
2091
1.67k
      case OverflowResult::NeverOverflows:
2092
27
        if (SI->isSigned())
2093
13
          return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2094
14
        else
2095
14
          return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2096
9
      case OverflowResult::AlwaysOverflowsLow: {
2097
9
        unsigned BitWidth = Ty->getScalarSizeInBits();
2098
9
        APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2099
9
        return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2100
0
      }
2101
8
      case OverflowResult::AlwaysOverflowsHigh: {
2102
8
        unsigned BitWidth = Ty->getScalarSizeInBits();
2103
8
        APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2104
8
        return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2105
1.63k
      }
2106
1.63k
    }
2107
1.63k
2108
1.63k
    // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2109
1.63k
    Constant *C;
2110
1.63k
    if (IID == Intrinsic::ssub_sat && 
match(Arg1, m_Constant(C))24
&&
2111
1.63k
        
C->isNotMinSignedValue()21
) {
2112
19
      Value *NegVal = ConstantExpr::getNeg(C);
2113
19
      return replaceInstUsesWith(
2114
19
          *II, Builder.CreateBinaryIntrinsic(
2115
19
              Intrinsic::sadd_sat, Arg0, NegVal));
2116
19
    }
2117
1.61k
2118
1.61k
    // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2119
1.61k
    // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2120
1.61k
    // if Val and Val2 have the same sign
2121
1.61k
    if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2122
20
      Value *X;
2123
20
      const APInt *Val, *Val2;
2124
20
      APInt NewVal;
2125
20
      bool IsUnsigned =
2126
20
          IID == Intrinsic::uadd_sat || 
IID == Intrinsic::usub_sat17
;
2127
20
      if (Other->getIntrinsicID() == IID &&
2128
20
          match(Arg1, m_APInt(Val)) &&
2129
20
          
match(Other->getArgOperand(0), m_Value(X))18
&&
2130
20
          
match(Other->getArgOperand(1), m_APInt(Val2))18
) {
2131
18
        if (IsUnsigned)
2132
4
          NewVal = Val->uadd_sat(*Val2);
2133
14
        else if (Val->isNonNegative() == Val2->isNonNegative()) {
2134
12
          bool Overflow;
2135
12
          NewVal = Val->sadd_ov(*Val2, Overflow);
2136
12
          if (Overflow) {
2137
2
            // Both adds together may add more than SignedMaxValue
2138
2
            // without saturating the final result.
2139
2
            break;
2140
2
          }
2141
2
        } else {
2142
2
          // Cannot fold saturated addition with different signs.
2143
2
          break;
2144
2
        }
2145
14
2146
14
        return replaceInstUsesWith(
2147
14
            *II, Builder.CreateBinaryIntrinsic(
2148
14
                     IID, X, ConstantInt::get(II->getType(), NewVal)));
2149
14
      }
2150
20
    }
2151
1.59k
    break;
2152
1.59k
  }
2153
1.59k
2154
1.59k
  case Intrinsic::minnum:
2155
1.24k
  case Intrinsic::maxnum:
2156
1.24k
  case Intrinsic::minimum:
2157
1.24k
  case Intrinsic::maximum: {
2158
1.24k
    if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2159
18
      return I;
2160
1.22k
    Value *Arg0 = II->getArgOperand(0);
2161
1.22k
    Value *Arg1 = II->getArgOperand(1);
2162
1.22k
    Value *X, *Y;
2163
1.22k
    if (match(Arg0, m_FNeg(m_Value(X))) && 
match(Arg1, m_FNeg(m_Value(Y)))39
&&
2164
1.22k
        
(39
Arg0->hasOneUse()39
||
Arg1->hasOneUse()16
)) {
2165
31
      // If both operands are negated, invert the call and negate the result:
2166
31
      // min(-X, -Y) --> -(max(X, Y))
2167
31
      // max(-X, -Y) --> -(min(X, Y))
2168
31
      Intrinsic::ID NewIID;
2169
31
      switch (IID) {
2170
31
      case Intrinsic::maxnum:
2171
7
        NewIID = Intrinsic::minnum;
2172
7
        break;
2173
31
      case Intrinsic::minnum:
2174
8
        NewIID = Intrinsic::maxnum;
2175
8
        break;
2176
31
      case Intrinsic::maximum:
2177
8
        NewIID = Intrinsic::minimum;
2178
8
        break;
2179
31
      case Intrinsic::minimum:
2180
8
        NewIID = Intrinsic::maximum;
2181
8
        break;
2182
31
      default:
2183
0
        llvm_unreachable("unexpected intrinsic ID");
2184
31
      }
2185
31
      Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2186
31
      Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2187
31
      FNeg->copyIRFlags(II);
2188
31
      return FNeg;
2189
31
    }
2190
1.19k
2191
1.19k
    // m(m(X, C2), C1) -> m(X, C)
2192
1.19k
    const APFloat *C1, *C2;
2193
1.19k
    if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2194
884
      if (M->getIntrinsicID() == IID && 
match(Arg1, m_APFloat(C1))28
&&
2195
884
          
(24
(24
match(M->getArgOperand(0), m_Value(X))24
&&
2196
24
            match(M->getArgOperand(1), m_APFloat(C2))) ||
2197
24
           
(0
match(M->getArgOperand(1), m_Value(X))0
&&
2198
24
            
match(M->getArgOperand(0), m_APFloat(C2))0
))) {
2199
24
        APFloat Res(0.0);
2200
24
        switch (IID) {
2201
24
        case Intrinsic::maxnum:
2202
6
          Res = maxnum(*C1, *C2);
2203
6
          break;
2204
24
        case Intrinsic::minnum:
2205
6
          Res = minnum(*C1, *C2);
2206
6
          break;
2207
24
        case Intrinsic::maximum:
2208
6
          Res = maximum(*C1, *C2);
2209
6
          break;
2210
24
        case Intrinsic::minimum:
2211
6
          Res = minimum(*C1, *C2);
2212
6
          break;
2213
24
        default:
2214
0
          llvm_unreachable("unexpected intrinsic ID");
2215
24
        }
2216
24
        Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2217
24
            IID, X, ConstantFP::get(Arg0->getType(), Res));
2218
24
        NewCall->copyIRFlags(II);
2219
24
        return replaceInstUsesWith(*II, NewCall);
2220
24
      }
2221
884
    }
2222
1.17k
2223
1.17k
    break;
2224
1.17k
  }
2225
1.17k
  case Intrinsic::fmuladd: {
2226
129
    // Canonicalize fast fmuladd to the separate fmul + fadd.
2227
129
    if (II->isFast()) {
2228
5
      BuilderTy::FastMathFlagGuard Guard(Builder);
2229
5
      Builder.setFastMathFlags(II->getFastMathFlags());
2230
5
      Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2231
5
                                      II->getArgOperand(1));
2232
5
      Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2233
5
      Add->takeName(II);
2234
5
      return replaceInstUsesWith(*II, Add);
2235
5
    }
2236
124
2237
124
    LLVM_FALLTHROUGH;
2238
124
  }
2239
621
  case Intrinsic::fma: {
2240
621
    if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2241
15
      return I;
2242
606
2243
606
    // fma fneg(x), fneg(y), z -> fma x, y, z
2244
606
    Value *Src0 = II->getArgOperand(0);
2245
606
    Value *Src1 = II->getArgOperand(1);
2246
606
    Value *X, *Y;
2247
606
    if (match(Src0, m_FNeg(m_Value(X))) && 
match(Src1, m_FNeg(m_Value(Y)))63
) {
2248
17
      II->setArgOperand(0, X);
2249
17
      II->setArgOperand(1, Y);
2250
17
      return II;
2251
17
    }
2252
589
2253
589
    // fma fabs(x), fabs(x), z -> fma x, x, z
2254
589
    if (match(Src0, m_FAbs(m_Value(X))) &&
2255
589
        
match(Src1, m_FAbs(m_Specific(X)))5
) {
2256
3
      II->setArgOperand(0, X);
2257
3
      II->setArgOperand(1, X);
2258
3
      return II;
2259
3
    }
2260
586
2261
586
    // fma x, 1, z -> fadd x, z
2262
586
    if (match(Src1, m_FPOne())) {
2263
5
      auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
2264
5
      FAdd->copyFastMathFlags(II);
2265
5
      return FAdd;
2266
5
    }
2267
581
2268
581
    break;
2269
581
  }
2270
31.4k
  case Intrinsic::fabs: {
2271
31.4k
    Value *Cond;
2272
31.4k
    Constant *LHS, *RHS;
2273
31.4k
    if (match(II->getArgOperand(0),
2274
31.4k
              m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2275
4
      CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2276
4
      CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2277
4
      return SelectInst::Create(Cond, Call0, Call1);
2278
4
    }
2279
31.4k
2280
31.4k
    LLVM_FALLTHROUGH;
2281
31.4k
  }
2282
35.3k
  case Intrinsic::ceil:
2283
35.3k
  case Intrinsic::floor:
2284
35.3k
  case Intrinsic::round:
2285
35.3k
  case Intrinsic::nearbyint:
2286
35.3k
  case Intrinsic::rint:
2287
35.3k
  case Intrinsic::trunc: {
2288
35.3k
    Value *ExtSrc;
2289
35.3k
    if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2290
192
      // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2291
192
      Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2292
192
      return new FPExtInst(NarrowII, II->getType());
2293
192
    }
2294
35.2k
    break;
2295
35.2k
  }
2296
35.2k
  case Intrinsic::cos:
2297
2.87k
  case Intrinsic::amdgcn_cos: {
2298
2.87k
    Value *X;
2299
2.87k
    Value *Src = II->getArgOperand(0);
2300
2.87k
    if (match(Src, m_FNeg(m_Value(X))) || 
match(Src, m_FAbs(m_Value(X)))2.85k
) {
2301
28
      // cos(-x) -> cos(x)
2302
28
      // cos(fabs(x)) -> cos(x)
2303
28
      II->setArgOperand(0, X);
2304
28
      return II;
2305
28
    }
2306
2.85k
    break;
2307
2.85k
  }
2308
2.85k
  case Intrinsic::sin: {
2309
2.22k
    Value *X;
2310
2.22k
    if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2311
6
      // sin(-x) --> -sin(x)
2312
6
      Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2313
6
      Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2314
6
      FNeg->copyFastMathFlags(II);
2315
6
      return FNeg;
2316
6
    }
2317
2.22k
    break;
2318
2.22k
  }
2319
2.22k
  case Intrinsic::ppc_altivec_lvx:
2320
4
  case Intrinsic::ppc_altivec_lvxl:
2321
4
    // Turn PPC lvx -> load if the pointer is known aligned.
2322
4
    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2323
4
                                   &DT) >= 16) {
2324
2
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2325
2
                                         PointerType::getUnqual(II->getType()));
2326
2
      return new LoadInst(II->getType(), Ptr);
2327
2
    }
2328
2
    break;
2329
2
  case Intrinsic::ppc_vsx_lxvw4x:
2330
2
  case Intrinsic::ppc_vsx_lxvd2x: {
2331
2
    // Turn PPC VSX loads into normal loads.
2332
2
    Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2333
2
                                       PointerType::getUnqual(II->getType()));
2334
2
    return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
2335
2
  }
2336
4
  case Intrinsic::ppc_altivec_stvx:
2337
4
  case Intrinsic::ppc_altivec_stvxl:
2338
4
    // Turn stvx -> store if the pointer is known aligned.
2339
4
    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2340
4
                                   &DT) >= 16) {
2341
2
      Type *OpPtrTy =
2342
2
        PointerType::getUnqual(II->getArgOperand(0)->getType());
2343
2
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2344
2
      return new StoreInst(II->getArgOperand(0), Ptr);
2345
2
    }
2346
2
    break;
2347
2
  case Intrinsic::ppc_vsx_stxvw4x:
2348
2
  case Intrinsic::ppc_vsx_stxvd2x: {
2349
2
    // Turn PPC VSX stores into normal stores.
2350
2
    Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2351
2
    Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2352
2
    return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2353
2
  }
2354
3
  case Intrinsic::ppc_qpx_qvlfs:
2355
3
    // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2356
3
    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2357
3
                                   &DT) >= 16) {
2358
1
      Type *VTy = VectorType::get(Builder.getFloatTy(),
2359
1
                                  II->getType()->getVectorNumElements());
2360
1
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2361
1
                                         PointerType::getUnqual(VTy));
2362
1
      Value *Load = Builder.CreateLoad(VTy, Ptr);
2363
1
      return new FPExtInst(Load, II->getType());
2364
1
    }
2365
2
    break;
2366
3
  case Intrinsic::ppc_qpx_qvlfd:
2367
3
    // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2368
3
    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2369
3
                                   &DT) >= 32) {
2370
1
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2371
1
                                         PointerType::getUnqual(II->getType()));
2372
1
      return new LoadInst(II->getType(), Ptr);
2373
1
    }
2374
2
    break;
2375
2
  case Intrinsic::ppc_qpx_qvstfs:
2376
2
    // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2377
2
    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2378
2
                                   &DT) >= 16) {
2379
1
      Type *VTy = VectorType::get(Builder.getFloatTy(),
2380
1
          II->getArgOperand(0)->getType()->getVectorNumElements());
2381
1
      Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2382
1
      Type *OpPtrTy = PointerType::getUnqual(VTy);
2383
1
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2384
1
      return new StoreInst(TOp, Ptr);
2385
1
    }
2386
1
    break;
2387
3
  case Intrinsic::ppc_qpx_qvstfd:
2388
3
    // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2389
3
    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2390
3
                                   &DT) >= 32) {
2391
1
      Type *OpPtrTy =
2392
1
        PointerType::getUnqual(II->getArgOperand(0)->getType());
2393
1
      Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2394
1
      return new StoreInst(II->getArgOperand(0), Ptr);
2395
1
    }
2396
2
    break;
2397
2
2398
24
  case Intrinsic::x86_bmi_bextr_32:
2399
24
  case Intrinsic::x86_bmi_bextr_64:
2400
24
  case Intrinsic::x86_tbm_bextri_u32:
2401
24
  case Intrinsic::x86_tbm_bextri_u64:
2402
24
    // If the RHS is a constant we can try some simplifications.
2403
24
    if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2404
24
      uint64_t Shift = C->getZExtValue();
2405
24
      uint64_t Length = (Shift >> 8) & 0xff;
2406
24
      Shift &= 0xff;
2407
24
      unsigned BitWidth = II->getType()->getIntegerBitWidth();
2408
24
      // If the length is 0 or the shift is out of range, replace with zero.
2409
24
      if (Length == 0 || 
Shift >= BitWidth20
)
2410
8
        return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2411
16
      // If the LHS is also a constant, we can completely constant fold this.
2412
16
      if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2413
12
        uint64_t Result = InC->getZExtValue() >> Shift;
2414
12
        if (Length > BitWidth)
2415
4
          Length = BitWidth;
2416
12
        Result &= maskTrailingOnes<uint64_t>(Length);
2417
12
        return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2418
12
      }
2419
4
      // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2420
4
      // are only masking bits that a shift already cleared?
2421
4
    }
2422
4
    break;
2423
4
2424
8
  case Intrinsic::x86_bmi_bzhi_32:
2425
8
  case Intrinsic::x86_bmi_bzhi_64:
2426
8
    // If the RHS is a constant we can try some simplifications.
2427
8
    if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2428
8
      uint64_t Index = C->getZExtValue() & 0xff;
2429
8
      unsigned BitWidth = II->getType()->getIntegerBitWidth();
2430
8
      if (Index >= BitWidth)
2431
2
        return replaceInstUsesWith(CI, II->getArgOperand(0));
2432
6
      if (Index == 0)
2433
2
        return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2434
4
      // If the LHS is also a constant, we can completely constant fold this.
2435
4
      if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2436
2
        uint64_t Result = InC->getZExtValue();
2437
2
        Result &= maskTrailingOnes<uint64_t>(Index);
2438
2
        return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2439
2
      }
2440
2
      // TODO should we convert this to an AND if the RHS is constant?
2441
2
    }
2442
2
    break;
2443
2
2444
9
  case Intrinsic::x86_vcvtph2ps_128:
2445
9
  case Intrinsic::x86_vcvtph2ps_256: {
2446
9
    auto Arg = II->getArgOperand(0);
2447
9
    auto ArgType = cast<VectorType>(Arg->getType());
2448
9
    auto RetType = cast<VectorType>(II->getType());
2449
9
    unsigned ArgWidth = ArgType->getNumElements();
2450
9
    unsigned RetWidth = RetType->getNumElements();
2451
9
    assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2452
9
    assert(ArgType->isIntOrIntVectorTy() &&
2453
9
           ArgType->getScalarSizeInBits() == 16 &&
2454
9
           "CVTPH2PS input type should be 16-bit integer vector");
2455
9
    assert(RetType->getScalarType()->isFloatTy() &&
2456
9
           "CVTPH2PS output type should be 32-bit float vector");
2457
9
2458
9
    // Constant folding: Convert to generic half to single conversion.
2459
9
    if (isa<ConstantAggregateZero>(Arg))
2460
2
      return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2461
7
2462
7
    if (isa<ConstantDataVector>(Arg)) {
2463
2
      auto VectorHalfAsShorts = Arg;
2464
2
      if (RetWidth < ArgWidth) {
2465
1
        SmallVector<uint32_t, 8> SubVecMask;
2466
5
        for (unsigned i = 0; i != RetWidth; 
++i4
)
2467
4
          SubVecMask.push_back((int)i);
2468
1
        VectorHalfAsShorts = Builder.CreateShuffleVector(
2469
1
            Arg, UndefValue::get(ArgType), SubVecMask);
2470
1
      }
2471
2
2472
2
      auto VectorHalfType =
2473
2
          VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2474
2
      auto VectorHalfs =
2475
2
          Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2476
2
      auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2477
2
      return replaceInstUsesWith(*II, VectorFloats);
2478
2
    }
2479
5
2480
5
    // We only use the lowest lanes of the argument.
2481
5
    if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2482
1
      II->setArgOperand(0, V);
2483
1
      return II;
2484
1
    }
2485
4
    break;
2486
4
  }
2487
4
2488
75
  case Intrinsic::x86_sse_cvtss2si:
2489
75
  case Intrinsic::x86_sse_cvtss2si64:
2490
75
  case Intrinsic::x86_sse_cvttss2si:
2491
75
  case Intrinsic::x86_sse_cvttss2si64:
2492
75
  case Intrinsic::x86_sse2_cvtsd2si:
2493
75
  case Intrinsic::x86_sse2_cvtsd2si64:
2494
75
  case Intrinsic::x86_sse2_cvttsd2si:
2495
75
  case Intrinsic::x86_sse2_cvttsd2si64:
2496
75
  case Intrinsic::x86_avx512_vcvtss2si32:
2497
75
  case Intrinsic::x86_avx512_vcvtss2si64:
2498
75
  case Intrinsic::x86_avx512_vcvtss2usi32:
2499
75
  case Intrinsic::x86_avx512_vcvtss2usi64:
2500
75
  case Intrinsic::x86_avx512_vcvtsd2si32:
2501
75
  case Intrinsic::x86_avx512_vcvtsd2si64:
2502
75
  case Intrinsic::x86_avx512_vcvtsd2usi32:
2503
75
  case Intrinsic::x86_avx512_vcvtsd2usi64:
2504
75
  case Intrinsic::x86_avx512_cvttss2si:
2505
75
  case Intrinsic::x86_avx512_cvttss2si64:
2506
75
  case Intrinsic::x86_avx512_cvttss2usi:
2507
75
  case Intrinsic::x86_avx512_cvttss2usi64:
2508
75
  case Intrinsic::x86_avx512_cvttsd2si:
2509
75
  case Intrinsic::x86_avx512_cvttsd2si64:
2510
75
  case Intrinsic::x86_avx512_cvttsd2usi:
2511
75
  case Intrinsic::x86_avx512_cvttsd2usi64: {
2512
75
    // These intrinsics only demand the 0th element of their input vectors. If
2513
75
    // we can simplify the input based on that, do so now.
2514
75
    Value *Arg = II->getArgOperand(0);
2515
75
    unsigned VWidth = Arg->getType()->getVectorNumElements();
2516
75
    if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2517
25
      II->setArgOperand(0, V);
2518
25
      return II;
2519
25
    }
2520
50
    break;
2521
50
  }
2522
50
2523
51
  case Intrinsic::x86_mmx_pmovmskb:
2524
51
  case Intrinsic::x86_sse_movmsk_ps:
2525
51
  case Intrinsic::x86_sse2_movmsk_pd:
2526
51
  case Intrinsic::x86_sse2_pmovmskb_128:
2527
51
  case Intrinsic::x86_avx_movmsk_pd_256:
2528
51
  case Intrinsic::x86_avx_movmsk_ps_256:
2529
51
  case Intrinsic::x86_avx2_pmovmskb:
2530
51
    if (Value *V = simplifyX86movmsk(*II, Builder))
2531
45
      return replaceInstUsesWith(*II, V);
2532
6
    break;
2533
6
2534
85
  case Intrinsic::x86_sse_comieq_ss:
2535
85
  case Intrinsic::x86_sse_comige_ss:
2536
85
  case Intrinsic::x86_sse_comigt_ss:
2537
85
  case Intrinsic::x86_sse_comile_ss:
2538
85
  case Intrinsic::x86_sse_comilt_ss:
2539
85
  case Intrinsic::x86_sse_comineq_ss:
2540
85
  case Intrinsic::x86_sse_ucomieq_ss:
2541
85
  case Intrinsic::x86_sse_ucomige_ss:
2542
85
  case Intrinsic::x86_sse_ucomigt_ss:
2543
85
  case Intrinsic::x86_sse_ucomile_ss:
2544
85
  case Intrinsic::x86_sse_ucomilt_ss:
2545
85
  case Intrinsic::x86_sse_ucomineq_ss:
2546
85
  case Intrinsic::x86_sse2_comieq_sd:
2547
85
  case Intrinsic::x86_sse2_comige_sd:
2548
85
  case Intrinsic::x86_sse2_comigt_sd:
2549
85
  case Intrinsic::x86_sse2_comile_sd:
2550
85
  case Intrinsic::x86_sse2_comilt_sd:
2551
85
  case Intrinsic::x86_sse2_comineq_sd:
2552
85
  case Intrinsic::x86_sse2_ucomieq_sd:
2553
85
  case Intrinsic::x86_sse2_ucomige_sd:
2554
85
  case Intrinsic::x86_sse2_ucomigt_sd:
2555
85
  case Intrinsic::x86_sse2_ucomile_sd:
2556
85
  case Intrinsic::x86_sse2_ucomilt_sd:
2557
85
  case Intrinsic::x86_sse2_ucomineq_sd:
2558
85
  case Intrinsic::x86_avx512_vcomi_ss:
2559
85
  case Intrinsic::x86_avx512_vcomi_sd:
2560
85
  case Intrinsic::x86_avx512_mask_cmp_ss:
2561
85
  case Intrinsic::x86_avx512_mask_cmp_sd: {
2562
85
    // These intrinsics only demand the 0th element of their input vectors. If
2563
85
    // we can simplify the input based on that, do so now.
2564
85
    bool MadeChange = false;
2565
85
    Value *Arg0 = II->getArgOperand(0);
2566
85
    Value *Arg1 = II->getArgOperand(1);
2567
85
    unsigned VWidth = Arg0->getType()->getVectorNumElements();
2568
85
    if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2569
28
      II->setArgOperand(0, V);
2570
28
      MadeChange = true;
2571
28
    }
2572
85
    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2573
28
      II->setArgOperand(1, V);
2574
28
      MadeChange = true;
2575
28
    }
2576
85
    if (MadeChange)
2577
28
      return II;
2578
57
    break;
2579
57
  }
2580
57
  case Intrinsic::x86_avx512_cmp_pd_128:
2581
41
  case Intrinsic::x86_avx512_cmp_pd_256:
2582
41
  case Intrinsic::x86_avx512_cmp_pd_512:
2583
41
  case Intrinsic::x86_avx512_cmp_ps_128:
2584
41
  case Intrinsic::x86_avx512_cmp_ps_256:
2585
41
  case Intrinsic::x86_avx512_cmp_ps_512: {
2586
41
    // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2587
41
    Value *Arg0 = II->getArgOperand(0);
2588
41
    Value *Arg1 = II->getArgOperand(1);
2589
41
    bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2590
41
    if (Arg0IsZero)
2591
6
      std::swap(Arg0, Arg1);
2592
41
    Value *A, *B;
2593
41
    // This fold requires only the NINF(not +/- inf) since inf minus
2594
41
    // inf is nan.
2595
41
    // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2596
41
    // equal for both compares.
2597
41
    // NNAN is not needed because nans compare the same for both compares.
2598
41
    // The compare intrinsic uses the above assumptions and therefore
2599
41
    // doesn't require additional flags.
2600
41
    if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2601
41
         
match(Arg1, m_PosZeroFP())14
&&
isa<Instruction>(Arg0)14
&&
2602
41
         
cast<Instruction>(Arg0)->getFastMathFlags().noInfs()14
)) {
2603
13
      if (Arg0IsZero)
2604
6
        std::swap(A, B);
2605
13
      II->setArgOperand(0, A);
2606
13
      II->setArgOperand(1, B);
2607
13
      return II;
2608
13
    }
2609
28
    break;
2610
28
  }
2611
28
2612
32
  case Intrinsic::x86_avx512_add_ps_512:
2613
32
  case Intrinsic::x86_avx512_div_ps_512:
2614
32
  case Intrinsic::x86_avx512_mul_ps_512:
2615
32
  case Intrinsic::x86_avx512_sub_ps_512:
2616
32
  case Intrinsic::x86_avx512_add_pd_512:
2617
32
  case Intrinsic::x86_avx512_div_pd_512:
2618
32
  case Intrinsic::x86_avx512_mul_pd_512:
2619
32
  case Intrinsic::x86_avx512_sub_pd_512:
2620
32
    // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2621
32
    // IR operations.
2622
32
    if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2623
32
      if (R->getValue() == 4) {
2624
16
        Value *Arg0 = II->getArgOperand(0);
2625
16
        Value *Arg1 = II->getArgOperand(1);
2626
16
2627
16
        Value *V;
2628
16
        switch (IID) {
2629
16
        
default: 0
llvm_unreachable0
("Case stmts out of sync!");
2630
16
        case Intrinsic::x86_avx512_add_ps_512:
2631
4
        case Intrinsic::x86_avx512_add_pd_512:
2632
4
          V = Builder.CreateFAdd(Arg0, Arg1);
2633
4
          break;
2634
4
        case Intrinsic::x86_avx512_sub_ps_512:
2635
4
        case Intrinsic::x86_avx512_sub_pd_512:
2636
4
          V = Builder.CreateFSub(Arg0, Arg1);
2637
4
          break;
2638
4
        case Intrinsic::x86_avx512_mul_ps_512:
2639
4
        case Intrinsic::x86_avx512_mul_pd_512:
2640
4
          V = Builder.CreateFMul(Arg0, Arg1);
2641
4
          break;
2642
4
        case Intrinsic::x86_avx512_div_ps_512:
2643
4
        case Intrinsic::x86_avx512_div_pd_512:
2644
4
          V = Builder.CreateFDiv(Arg0, Arg1);
2645
4
          break;
2646
16
        }
2647
16
2648
16
        return replaceInstUsesWith(*II, V);
2649
16
      }
2650
32
    }
2651
16
    break;
2652
16
2653
64
  case Intrinsic::x86_avx512_mask_add_ss_round:
2654
64
  case Intrinsic::x86_avx512_mask_div_ss_round:
2655
64
  case Intrinsic::x86_avx512_mask_mul_ss_round:
2656
64
  case Intrinsic::x86_avx512_mask_sub_ss_round:
2657
64
  case Intrinsic::x86_avx512_mask_add_sd_round:
2658
64
  case Intrinsic::x86_avx512_mask_div_sd_round:
2659
64
  case Intrinsic::x86_avx512_mask_mul_sd_round:
2660
64
  case Intrinsic::x86_avx512_mask_sub_sd_round:
2661
64
    // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2662
64
    // IR operations.
2663
64
    if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2664
64
      if (R->getValue() == 4) {
2665
16
        // Extract the element as scalars.
2666
16
        Value *Arg0 = II->getArgOperand(0);
2667
16
        Value *Arg1 = II->getArgOperand(1);
2668
16
        Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2669
16
        Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2670
16
2671
16
        Value *V;
2672
16
        switch (IID) {
2673
16
        
default: 0
llvm_unreachable0
("Case stmts out of sync!");
2674
16
        case Intrinsic::x86_avx512_mask_add_ss_round:
2675
4
        case Intrinsic::x86_avx512_mask_add_sd_round:
2676
4
          V = Builder.CreateFAdd(LHS, RHS);
2677
4
          break;
2678
4
        case Intrinsic::x86_avx512_mask_sub_ss_round:
2679
4
        case Intrinsic::x86_avx512_mask_sub_sd_round:
2680
4
          V = Builder.CreateFSub(LHS, RHS);
2681
4
          break;
2682
4
        case Intrinsic::x86_avx512_mask_mul_ss_round:
2683
4
        case Intrinsic::x86_avx512_mask_mul_sd_round:
2684
4
          V = Builder.CreateFMul(LHS, RHS);
2685
4
          break;
2686
4
        case Intrinsic::x86_avx512_mask_div_ss_round:
2687
4
        case Intrinsic::x86_avx512_mask_div_sd_round:
2688
4
          V = Builder.CreateFDiv(LHS, RHS);
2689
4
          break;
2690
16
        }
2691
16
2692
16
        // Handle the masking aspect of the intrinsic.
2693
16
        Value *Mask = II->getArgOperand(3);
2694
16
        auto *C = dyn_cast<ConstantInt>(Mask);
2695
16
        // We don't need a select if we know the mask bit is a 1.
2696
16
        if (!C || 
!C->getValue()[0]8
) {
2697
8
          // Cast the mask to an i1 vector and then extract the lowest element.
2698
8
          auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2699
8
                             cast<IntegerType>(Mask->getType())->getBitWidth());
2700
8
          Mask = Builder.CreateBitCast(Mask, MaskTy);
2701
8
          Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2702
8
          // Extract the lowest element from the passthru operand.
2703
8
          Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2704
8
                                                          (uint64_t)0);
2705
8
          V = Builder.CreateSelect(Mask, V, Passthru);
2706
8
        }
2707
16
2708
16
        // Insert the result back into the original argument 0.
2709
16
        V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2710
16
2711
16
        return replaceInstUsesWith(*II, V);
2712
16
      }
2713
64
    }
2714
48
    break;
2715
48
2716
48
  // Constant fold ashr( <A x Bi>, Ci ).
2717
48
  // Constant fold lshr( <A x Bi>, Ci ).
2718
48
  // Constant fold shl( <A x Bi>, Ci ).
2719
129
  case Intrinsic::x86_sse2_psrai_d:
2720
129
  case Intrinsic::x86_sse2_psrai_w:
2721
129
  case Intrinsic::x86_avx2_psrai_d:
2722
129
  case Intrinsic::x86_avx2_psrai_w:
2723
129
  case Intrinsic::x86_avx512_psrai_q_128:
2724
129
  case Intrinsic::x86_avx512_psrai_q_256:
2725
129
  case Intrinsic::x86_avx512_psrai_d_512:
2726
129
  case Intrinsic::x86_avx512_psrai_q_512:
2727
129
  case Intrinsic::x86_avx512_psrai_w_512:
2728
129
  case Intrinsic::x86_sse2_psrli_d:
2729
129
  case Intrinsic::x86_sse2_psrli_q:
2730
129
  case Intrinsic::x86_sse2_psrli_w:
2731
129
  case Intrinsic::x86_avx2_psrli_d:
2732
129
  case Intrinsic::x86_avx2_psrli_q:
2733
129
  case Intrinsic::x86_avx2_psrli_w:
2734
129
  case Intrinsic::x86_avx512_psrli_d_512:
2735
129
  case Intrinsic::x86_avx512_psrli_q_512:
2736
129
  case Intrinsic::x86_avx512_psrli_w_512:
2737
129
  case Intrinsic::x86_sse2_pslli_d:
2738
129
  case Intrinsic::x86_sse2_pslli_q:
2739
129
  case Intrinsic::x86_sse2_pslli_w:
2740
129
  case Intrinsic::x86_avx2_pslli_d:
2741
129
  case Intrinsic::x86_avx2_pslli_q:
2742
129
  case Intrinsic::x86_avx2_pslli_w:
2743
129
  case Intrinsic::x86_avx512_pslli_d_512:
2744
129
  case Intrinsic::x86_avx512_pslli_q_512:
2745
129
  case Intrinsic::x86_avx512_pslli_w_512:
2746
129
    if (Value *V = simplifyX86immShift(*II, Builder))
2747
129
      return replaceInstUsesWith(*II, V);
2748
0
    break;
2749
0
2750
266
  case Intrinsic::x86_sse2_psra_d:
2751
266
  case Intrinsic::x86_sse2_psra_w:
2752
266
  case Intrinsic::x86_avx2_psra_d:
2753
266
  case Intrinsic::x86_avx2_psra_w:
2754
266
  case Intrinsic::x86_avx512_psra_q_128:
2755
266
  case Intrinsic::x86_avx512_psra_q_256:
2756
266
  case Intrinsic::x86_avx512_psra_d_512:
2757
266
  case Intrinsic::x86_avx512_psra_q_512:
2758
266
  case Intrinsic::x86_avx512_psra_w_512:
2759
266
  case Intrinsic::x86_sse2_psrl_d:
2760
266
  case Intrinsic::x86_sse2_psrl_q:
2761
266
  case Intrinsic::x86_sse2_psrl_w:
2762
266
  case Intrinsic::x86_avx2_psrl_d:
2763
266
  case Intrinsic::x86_avx2_psrl_q:
2764
266
  case Intrinsic::x86_avx2_psrl_w:
2765
266
  case Intrinsic::x86_avx512_psrl_d_512:
2766
266
  case Intrinsic::x86_avx512_psrl_q_512:
2767
266
  case Intrinsic::x86_avx512_psrl_w_512:
2768
266
  case Intrinsic::x86_sse2_psll_d:
2769
266
  case Intrinsic::x86_sse2_psll_q:
2770
266
  case Intrinsic::x86_sse2_psll_w:
2771
266
  case Intrinsic::x86_avx2_psll_d:
2772
266
  case Intrinsic::x86_avx2_psll_q:
2773
266
  case Intrinsic::x86_avx2_psll_w:
2774
266
  case Intrinsic::x86_avx512_psll_d_512:
2775
266
  case Intrinsic::x86_avx512_psll_q_512:
2776
266
  case Intrinsic::x86_avx512_psll_w_512: {
2777
266
    if (Value *V = simplifyX86immShift(*II, Builder))
2778
134
      return replaceInstUsesWith(*II, V);
2779
132
2780
132
    // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2781
132
    // operand to compute the shift amount.
2782
132
    Value *Arg1 = II->getArgOperand(1);
2783
132
    assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2784
132
           "Unexpected packed shift size");
2785
132
    unsigned VWidth = Arg1->getType()->getVectorNumElements();
2786
132
2787
132
    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2788
33
      II->setArgOperand(1, V);
2789
33
      return II;
2790
33
    }
2791
99
    break;
2792
99
  }
2793
99
2794
126
  case Intrinsic::x86_avx2_psllv_d:
2795
126
  case Intrinsic::x86_avx2_psllv_d_256:
2796
126
  case Intrinsic::x86_avx2_psllv_q:
2797
126
  case Intrinsic::x86_avx2_psllv_q_256:
2798
126
  case Intrinsic::x86_avx512_psllv_d_512:
2799
126
  case Intrinsic::x86_avx512_psllv_q_512:
2800
126
  case Intrinsic::x86_avx512_psllv_w_128:
2801
126
  case Intrinsic::x86_avx512_psllv_w_256:
2802
126
  case Intrinsic::x86_avx512_psllv_w_512:
2803
126
  case Intrinsic::x86_avx2_psrav_d:
2804
126
  case Intrinsic::x86_avx2_psrav_d_256:
2805
126
  case Intrinsic::x86_avx512_psrav_q_128:
2806
126
  case Intrinsic::x86_avx512_psrav_q_256:
2807
126
  case Intrinsic::x86_avx512_psrav_d_512:
2808
126
  case Intrinsic::x86_avx512_psrav_q_512:
2809
126
  case Intrinsic::x86_avx512_psrav_w_128:
2810
126
  case Intrinsic::x86_avx512_psrav_w_256:
2811
126
  case Intrinsic::x86_avx512_psrav_w_512:
2812
126
  case Intrinsic::x86_avx2_psrlv_d:
2813
126
  case Intrinsic::x86_avx2_psrlv_d_256:
2814
126
  case Intrinsic::x86_avx2_psrlv_q:
2815
126
  case Intrinsic::x86_avx2_psrlv_q_256:
2816
126
  case Intrinsic::x86_avx512_psrlv_d_512:
2817
126
  case Intrinsic::x86_avx512_psrlv_q_512:
2818
126
  case Intrinsic::x86_avx512_psrlv_w_128:
2819
126
  case Intrinsic::x86_avx512_psrlv_w_256:
2820
126
  case Intrinsic::x86_avx512_psrlv_w_512:
2821
126
    if (Value *V = simplifyX86varShift(*II, Builder))
2822
108
      return replaceInstUsesWith(*II, V);
2823
18
    break;
2824
18
2825
38
  case Intrinsic::x86_sse2_packssdw_128:
2826
38
  case Intrinsic::x86_sse2_packsswb_128:
2827
38
  case Intrinsic::x86_avx2_packssdw:
2828
38
  case Intrinsic::x86_avx2_packsswb:
2829
38
  case Intrinsic::x86_avx512_packssdw_512:
2830
38
  case Intrinsic::x86_avx512_packsswb_512:
2831
38
    if (Value *V = simplifyX86pack(*II, Builder, true))
2832
9
      return replaceInstUsesWith(*II, V);
2833
29
    break;
2834
29
2835
29
  case Intrinsic::x86_sse2_packuswb_128:
2836
19
  case Intrinsic::x86_sse41_packusdw:
2837
19
  case Intrinsic::x86_avx2_packusdw:
2838
19
  case Intrinsic::x86_avx2_packuswb:
2839
19
  case Intrinsic::x86_avx512_packusdw_512:
2840
19
  case Intrinsic::x86_avx512_packuswb_512:
2841
19
    if (Value *V = simplifyX86pack(*II, Builder, false))
2842
9
      return replaceInstUsesWith(*II, V);
2843
10
    break;
2844
10
2845
54
  case Intrinsic::x86_pclmulqdq:
2846
54
  case Intrinsic::x86_pclmulqdq_256:
2847
54
  case Intrinsic::x86_pclmulqdq_512: {
2848
54
    if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2849
54
      unsigned Imm = C->getZExtValue();
2850
54
2851
54
      bool MadeChange = false;
2852
54
      Value *Arg0 = II->getArgOperand(0);
2853
54
      Value *Arg1 = II->getArgOperand(1);
2854
54
      unsigned VWidth = Arg0->getType()->getVectorNumElements();
2855
54
2856
54
      APInt UndefElts1(VWidth, 0);
2857
54
      APInt DemandedElts1 = APInt::getSplat(VWidth,
2858
54
                                            APInt(2, (Imm & 0x01) ? 
226
:
128
));
2859
54
      if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2860
24
                                                UndefElts1)) {
2861
24
        II->setArgOperand(0, V);
2862
24
        MadeChange = true;
2863
24
      }
2864
54
2865
54
      APInt UndefElts2(VWidth, 0);
2866
54
      APInt DemandedElts2 = APInt::getSplat(VWidth,
2867
54
                                            APInt(2, (Imm & 0x10) ? 
226
:
128
));
2868
54
      if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2869
24
                                                UndefElts2)) {
2870
24
        II->setArgOperand(1, V);
2871
24
        MadeChange = true;
2872
24
      }
2873
54
2874
54
      // If either input elements are undef, the result is zero.
2875
54
      if (DemandedElts1.isSubsetOf(UndefElts1) ||
2876
54
          
DemandedElts2.isSubsetOf(UndefElts2)42
)
2877
12
        return replaceInstUsesWith(*II,
2878
12
                                   ConstantAggregateZero::get(II->getType()));
2879
42
2880
42
      if (MadeChange)
2881
12
        return II;
2882
30
    }
2883
30
    break;
2884
30
  }
2885
30
2886
30
  case Intrinsic::x86_sse41_insertps:
2887
14
    if (Value *V = simplifyX86insertps(*II, Builder))
2888
13
      return replaceInstUsesWith(*II, V);
2889
1
    break;
2890
1
2891
20
  case Intrinsic::x86_sse4a_extrq: {
2892
20
    Value *Op0 = II->getArgOperand(0);
2893
20
    Value *Op1 = II->getArgOperand(1);
2894
20
    unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2895
20
    unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2896
20
    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2897
20
           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2898
20
           VWidth1 == 16 && "Unexpected operand sizes");
2899
20
2900
20
    // See if we're dealing with constant values.
2901
20
    Constant *C1 = dyn_cast<Constant>(Op1);
2902
20
    ConstantInt *CILength =
2903
20
        C1 ? 
dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))5
2904
20
           : 
nullptr15
;
2905
20
    ConstantInt *CIIndex =
2906
20
        C1 ? 
dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))5
2907
20
           : 
nullptr15
;
2908
20
2909
20
    // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
2910
20
    if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2911
6
      return replaceInstUsesWith(*II, V);
2912
14
2913
14
    // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2914
14
    // operands and the lowest 16-bits of the second.
2915
14
    bool MadeChange = false;
2916
14
    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2917
2
      II->setArgOperand(0, V);
2918
2
      MadeChange = true;
2919
2
    }
2920
14
    if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2921
2
      II->setArgOperand(1, V);
2922
2
      MadeChange = true;
2923
2
    }
2924
14
    if (MadeChange)
2925
3
      return II;
2926
11
    break;
2927
11
  }
2928
11
2929
15
  case Intrinsic::x86_sse4a_extrqi: {
2930
15
    // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2931
15
    // bits of the lower 64-bits. The upper 64-bits are undefined.
2932
15
    Value *Op0 = II->getArgOperand(0);
2933
15
    unsigned VWidth = Op0->getType()->getVectorNumElements();
2934
15
    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2935
15
           "Unexpected operand size");
2936
15
2937
15
    // See if we're dealing with constant values.
2938
15
    ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2939
15
    ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2940
15
2941
15
    // Attempt to simplify to a constant or shuffle vector.
2942
15
    if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2943
7
      return replaceInstUsesWith(*II, V);
2944
8
2945
8
    // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2946
8
    // operand.
2947
8
    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2948
1
      II->setArgOperand(0, V);
2949
1
      return II;
2950
1
    }
2951
7
    break;
2952
7
  }
2953
7
2954
10
  case Intrinsic::x86_sse4a_insertq: {
2955
10
    Value *Op0 = II->getArgOperand(0);
2956
10
    Value *Op1 = II->getArgOperand(1);
2957
10
    unsigned VWidth = Op0->getType()->getVectorNumElements();
2958
10
    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2959
10
           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2960
10
           Op1->getType()->getVectorNumElements() == 2 &&
2961
10
           "Unexpected operand size");
2962
10
2963
10
    // See if we're dealing with constant values.
2964
10
    Constant *C1 = dyn_cast<Constant>(Op1);
2965
10
    ConstantInt *CI11 =
2966
10
        C1 ? 
dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))4
2967
10
           : 
nullptr6
;
2968
10
2969
10
    // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2970
10
    if (CI11) {
2971
4
      const APInt &V11 = CI11->getValue();
2972
4
      APInt Len = V11.zextOrTrunc(6);
2973
4
      APInt Idx = V11.lshr(8).zextOrTrunc(6);
2974
4
      if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
2975
4
        return replaceInstUsesWith(*II, V);
2976
6
    }
2977
6
2978
6
    // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2979
6
    // operand.
2980
6
    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2981
1
      II->setArgOperand(0, V);
2982
1
      return II;
2983
1
    }
2984
5
    break;
2985
5
  }
2986
5
2987
30
  case Intrinsic::x86_sse4a_insertqi: {
2988
30
    // INSERTQI: Extract lowest Length bits from lower half of second source and
2989
30
    // insert over first source starting at Index bit. The upper 64-bits are
2990
30
    // undefined.
2991
30
    Value *Op0 = II->getArgOperand(0);
2992
30
    Value *Op1 = II->getArgOperand(1);
2993
30
    unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2994
30
    unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2995
30
    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2996
30
           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2997
30
           VWidth1 == 2 && "Unexpected operand sizes");
2998
30
2999
30
    // See if we're dealing with constant values.
3000
30
    ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3001
30
    ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3002
30
3003
30
    // Attempt to simplify to a constant or shuffle vector.
3004
30
    if (CILength && CIIndex) {
3005
30
      APInt Len = CILength->getValue().zextOrTrunc(6);
3006
30
      APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3007
30
      if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3008
8
        return replaceInstUsesWith(*II, V);
3009
22
    }
3010
22
3011
22
    // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3012
22
    // operands.
3013
22
    bool MadeChange = false;
3014
22
    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3015
2
      II->setArgOperand(0, V);
3016
2
      MadeChange = true;
3017
2
    }
3018
22
    if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3019
5
      II->setArgOperand(1, V);
3020
5
      MadeChange = true;
3021
5
    }
3022
22
    if (MadeChange)
3023
6
      return II;
3024
16
    break;
3025
16
  }
3026
16
3027
26
  case Intrinsic::x86_sse41_pblendvb:
3028
26
  case Intrinsic::x86_sse41_blendvps:
3029
26
  case Intrinsic::x86_sse41_blendvpd:
3030
26
  case Intrinsic::x86_avx_blendv_ps_256:
3031
26
  case Intrinsic::x86_avx_blendv_pd_256:
3032
26
  case Intrinsic::x86_avx2_pblendvb: {
3033
26
    // fold (blend A, A, Mask) -> A
3034
26
    Value *Op0 = II->getArgOperand(0);
3035
26
    Value *Op1 = II->getArgOperand(1);
3036
26
    Value *Mask = II->getArgOperand(2);
3037
26
    if (Op0 == Op1)
3038
6
      return replaceInstUsesWith(CI, Op0);
3039
20
3040
20
    // Zero Mask - select 1st argument.
3041
20
    if (isa<ConstantAggregateZero>(Mask))
3042
6
      return replaceInstUsesWith(CI, Op0);
3043
14
3044
14
    // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3045
14
    if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3046
6
      Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3047
6
      return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3048
6
    }
3049
8
3050
8
    // Convert to a vector select if we can bypass casts and find a boolean
3051
8
    // vector condition value.
3052
8
    Value *BoolVec;
3053
8
    Mask = peekThroughBitcast(Mask);
3054
8
    if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3055
8
        BoolVec->getType()->isVectorTy() &&
3056
8
        BoolVec->getType()->getScalarSizeInBits() == 1) {
3057
8
      assert(Mask->getType()->getPrimitiveSizeInBits() ==
3058
8
             II->getType()->getPrimitiveSizeInBits() &&
3059
8
             "Not expecting mask and operands with different sizes");
3060
8
3061
8
      unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3062
8
      unsigned NumOperandElts = II->getType()->getVectorNumElements();
3063
8
      if (NumMaskElts == NumOperandElts)
3064
6
        return SelectInst::Create(BoolVec, Op1, Op0);
3065
2
3066
2
      // If the mask has less elements than the operands, each mask bit maps to
3067
2
      // multiple elements of the operands. Bitcast back and forth.
3068
2
      if (NumMaskElts < NumOperandElts) {
3069
2
        Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3070
2
        Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3071
2
        Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3072
2
        return new BitCastInst(Sel, II->getType());
3073
2
      }
3074
0
    }
3075
0
3076
0
    break;
3077
0
  }
3078
0
3079
55
  case Intrinsic::x86_ssse3_pshuf_b_128:
3080
55
  case Intrinsic::x86_avx2_pshuf_b:
3081
55
  case Intrinsic::x86_avx512_pshuf_b_512:
3082
55
    if (Value *V = simplifyX86pshufb(*II, Builder))
3083
48
      return replaceInstUsesWith(*II, V);
3084
7
    break;
3085
7
3086
39
  case Intrinsic::x86_avx_vpermilvar_ps:
3087
39
  case Intrinsic::x86_avx_vpermilvar_ps_256:
3088
39
  case Intrinsic::x86_avx512_vpermilvar_ps_512:
3089
39
  case Intrinsic::x86_avx_vpermilvar_pd:
3090
39
  case Intrinsic::x86_avx_vpermilvar_pd_256:
3091
39
  case Intrinsic::x86_avx512_vpermilvar_pd_512:
3092
39
    if (Value *V = simplifyX86vpermilvar(*II, Builder))
3093
28
      return replaceInstUsesWith(*II, V);
3094
11
    break;
3095
11
3096
124
  case Intrinsic::x86_avx2_permd:
3097
124
  case Intrinsic::x86_avx2_permps:
3098
124
  case Intrinsic::x86_avx512_permvar_df_256:
3099
124
  case Intrinsic::x86_avx512_permvar_df_512:
3100
124
  case Intrinsic::x86_avx512_permvar_di_256:
3101
124
  case Intrinsic::x86_avx512_permvar_di_512:
3102
124
  case Intrinsic::x86_avx512_permvar_hi_128:
3103
124
  case Intrinsic::x86_avx512_permvar_hi_256:
3104
124
  case Intrinsic::x86_avx512_permvar_hi_512:
3105
124
  case Intrinsic::x86_avx512_permvar_qi_128:
3106
124
  case Intrinsic::x86_avx512_permvar_qi_256:
3107
124
  case Intrinsic::x86_avx512_permvar_qi_512:
3108
124
  case Intrinsic::x86_avx512_permvar_sf_512:
3109
124
  case Intrinsic::x86_avx512_permvar_si_512:
3110
124
    if (Value *V = simplifyX86vpermv(*II, Builder))
3111
121
      return replaceInstUsesWith(*II, V);
3112
3
    break;
3113
3
3114
12
  case Intrinsic::x86_avx_maskload_ps:
3115
12
  case Intrinsic::x86_avx_maskload_pd:
3116
12
  case Intrinsic::x86_avx_maskload_ps_256:
3117
12
  case Intrinsic::x86_avx_maskload_pd_256:
3118
12
  case Intrinsic::x86_avx2_maskload_d:
3119
12
  case Intrinsic::x86_avx2_maskload_q:
3120
12
  case Intrinsic::x86_avx2_maskload_d_256:
3121
12
  case Intrinsic::x86_avx2_maskload_q_256:
3122
12
    if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3123
11
      return I;
3124
1
    break;
3125
1
3126
22
  case Intrinsic::x86_sse2_maskmov_dqu:
3127
22
  case Intrinsic::x86_avx_maskstore_ps:
3128
22
  case Intrinsic::x86_avx_maskstore_pd:
3129
22
  case Intrinsic::x86_avx_maskstore_ps_256:
3130
22
  case Intrinsic::x86_avx_maskstore_pd_256:
3131
22
  case Intrinsic::x86_avx2_maskstore_d:
3132
22
  case Intrinsic::x86_avx2_maskstore_q:
3133
22
  case Intrinsic::x86_avx2_maskstore_d_256:
3134
22
  case Intrinsic::x86_avx2_maskstore_q_256:
3135
22
    if (simplifyX86MaskedStore(*II, *this))
3136
12
      return nullptr;
3137
10
    break;
3138
10
3139
10
  case Intrinsic::x86_addcarry_32:
3140
2
  case Intrinsic::x86_addcarry_64:
3141
2
    if (Value *V = simplifyX86addcarry(*II, Builder))
3142
2
      return replaceInstUsesWith(*II, V);
3143
0
    break;
3144
0
3145
2
  case Intrinsic::ppc_altivec_vperm:
3146
2
    // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3147
2
    // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3148
2
    // a vectorshuffle for little endian, we must undo the transformation
3149
2
    // performed on vec_perm in altivec.h.  That is, we must complement
3150
2
    // the permutation mask with respect to 31 and reverse the order of
3151
2
    // V1 and V2.
3152
2
    if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3153
2
      assert(Mask->getType()->getVectorNumElements() == 16 &&
3154
2
             "Bad type for intrinsic!");
3155
2
3156
2
      // Check that all of the elements are integer constants or undefs.
3157
2
      bool AllEltsOk = true;
3158
34
      for (unsigned i = 0; i != 16; 
++i32
) {
3159
32
        Constant *Elt = Mask->getAggregateElement(i);
3160
32
        if (!Elt || !(isa<ConstantInt>(Elt) || 
isa<UndefValue>(Elt)0
)) {
3161
0
          AllEltsOk = false;
3162
0
          break;
3163
0
        }
3164
32
      }
3165
2
3166
2
      if (AllEltsOk) {
3167
2
        // Cast the input vectors to byte vectors.
3168
2
        Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3169
2
                                           Mask->getType());
3170
2
        Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3171
2
                                           Mask->getType());
3172
2
        Value *Result = UndefValue::get(Op0->getType());
3173
2
3174
2
        // Only extract each element once.
3175
2
        Value *ExtractedElts[32];
3176
2
        memset(ExtractedElts, 0, sizeof(ExtractedElts));
3177
2
3178
34
        for (unsigned i = 0; i != 16; 
++i32
) {
3179
32
          if (isa<UndefValue>(Mask->getAggregateElement(i)))
3180
0
            continue;
3181
32
          unsigned Idx =
3182
32
            cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3183
32
          Idx &= 31;  // Match the hardware behavior.
3184
32
          if (DL.isLittleEndian())
3185
32
            Idx = 31 - Idx;
3186
32
3187
32
          if (!ExtractedElts[Idx]) {
3188
17
            Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : 
Op00
;
3189
17
            Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : 
Op10
;
3190
17
            ExtractedElts[Idx] =
3191
17
              Builder.CreateExtractElement(Idx < 16 ? 
Op0ToUse8
:
Op1ToUse9
,
3192
17
                                           Builder.getInt32(Idx&15));
3193
17
          }
3194
32
3195
32
          // Insert this value into the result vector.
3196
32
          Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3197
32
                                               Builder.getInt32(i));
3198
32
        }
3199
2
        return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3200
2
      }
3201
0
    }
3202
0
    break;
3203
0
3204
10
  case Intrinsic::arm_neon_vld1: {
3205
10
    unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3206
10
                                          DL, II, &AC, &DT);
3207
10
    if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3208
8
      return replaceInstUsesWith(*II, V);
3209
2
    break;
3210
2
  }
3211
2
3212
6
  case Intrinsic::arm_neon_vld2:
3213
6
  case Intrinsic::arm_neon_vld3:
3214
6
  case Intrinsic::arm_neon_vld4:
3215
6
  case Intrinsic::arm_neon_vld2lane:
3216
6
  case Intrinsic::arm_neon_vld3lane:
3217
6
  case Intrinsic::arm_neon_vld4lane:
3218
6
  case Intrinsic::arm_neon_vst1:
3219
6
  case Intrinsic::arm_neon_vst2:
3220
6
  case Intrinsic::arm_neon_vst3:
3221
6
  case Intrinsic::arm_neon_vst4:
3222
6
  case Intrinsic::arm_neon_vst2lane:
3223
6
  case Intrinsic::arm_neon_vst3lane:
3224
6
  case Intrinsic::arm_neon_vst4lane: {
3225
6
    unsigned MemAlign =
3226
6
        getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3227
6
    unsigned AlignArg = II->getNumArgOperands() - 1;
3228
6
    ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3229
6
    if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3230
2
      II->setArgOperand(AlignArg,
3231
2
                        ConstantInt::get(Type::getInt32Ty(II->getContext()),
3232
2
                                         MemAlign, false));
3233
2
      return II;
3234
2
    }
3235
4
    break;
3236
4
  }
3237
4
3238
301
  case Intrinsic::arm_neon_vtbl1:
3239
301
  case Intrinsic::aarch64_neon_tbl1:
3240
301
    if (Value *V = simplifyNeonTbl1(*II, Builder))
3241
2
      return replaceInstUsesWith(*II, V);
3242
299
    break;
3243
299
3244
371
  case Intrinsic::arm_neon_vmulls:
3245
371
  case Intrinsic::arm_neon_vmullu:
3246
371
  case Intrinsic::aarch64_neon_smull:
3247
371
  case Intrinsic::aarch64_neon_umull: {
3248
371
    Value *Arg0 = II->getArgOperand(0);
3249
371
    Value *Arg1 = II->getArgOperand(1);
3250
371
3251
371
    // Handle mul by zero first:
3252
371
    if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3253
9
      return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3254
9
    }
3255
362
3256
362
    // Check for constant LHS & RHS - in this case we just simplify.
3257
362
    bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3258
362
                 
IID == Intrinsic::aarch64_neon_umull361
);
3259
362
    VectorType *NewVT = cast<VectorType>(II->getType());
3260
362
    if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3261
107
      if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3262
104
        CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3263
104
        CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3264
104
3265
104
        return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3266
104
      }
3267
3
3268
3
      // Couldn't simplify - canonicalize constant to the RHS.
3269
3
      std::swap(Arg0, Arg1);
3270
3
    }
3271
362
3272
362
    // Handle mul by one:
3273
362
    
if (Constant *258
CV1258
= dyn_cast<Constant>(Arg1))
3274
56
      if (ConstantInt *Splat =
3275
56
              dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3276
56
        if (Splat->isOne())
3277
1
          return CastInst::CreateIntegerCast(Arg0, II->getType(),
3278
1
                                             /*isSigned=*/!Zext);
3279
257
3280
257
    break;
3281
257
  }
3282
257
  case Intrinsic::arm_neon_aesd:
3283
108
  case Intrinsic::arm_neon_aese:
3284
108
  case Intrinsic::aarch64_crypto_aesd:
3285
108
  case Intrinsic::aarch64_crypto_aese: {
3286
108
    Value *DataArg = II->getArgOperand(0);
3287
108
    Value *KeyArg  = II->getArgOperand(1);
3288
108
3289
108
    // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3290
108
    Value *Data, *Key;
3291
108
    if (match(KeyArg, m_ZeroInt()) &&
3292
108
        
match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))4
) {
3293
4
      II->setArgOperand(0, Data);
3294
4
      II->setArgOperand(1, Key);
3295
4
      return II;
3296
4
    }
3297
104
    break;
3298
104
  }
3299
104
  case Intrinsic::amdgcn_rcp: {
3300
55
    Value *Src = II->getArgOperand(0);
3301
55
3302
55
    // TODO: Move to ConstantFolding/InstSimplify?
3303
55
    if (isa<UndefValue>(Src))
3304
1
      return replaceInstUsesWith(CI, Src);
3305
54
3306
54
    if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3307
6
      const APFloat &ArgVal = C->getValueAPF();
3308
6
      APFloat Val(ArgVal.getSemantics(), 1.0);
3309
6
      APFloat::opStatus Status = Val.divide(ArgVal,
3310
6
                                            APFloat::rmNearestTiesToEven);
3311
6
      // Only do this if it was exact and therefore not dependent on the
3312
6
      // rounding mode.
3313
6
      if (Status == APFloat::opOK)
3314
4
        return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3315
50
    }
3316
50
3317
50
    break;
3318
50
  }
3319
50
  case Intrinsic::amdgcn_rsq: {
3320
49
    Value *Src = II->getArgOperand(0);
3321
49
3322
49
    // TODO: Move to ConstantFolding/InstSimplify?
3323
49
    if (isa<UndefValue>(Src))
3324
1
      return replaceInstUsesWith(CI, Src);
3325
48
    break;
3326
48
  }
3327
138
  case Intrinsic::amdgcn_frexp_mant:
3328
138
  case Intrinsic::amdgcn_frexp_exp: {
3329
138
    Value *Src = II->getArgOperand(0);
3330
138
    if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3331
38
      int Exp;
3332
38
      APFloat Significand = frexp(C->getValueAPF(), Exp,
3333
38
                                  APFloat::rmNearestTiesToEven);
3334
38
3335
38
      if (IID == Intrinsic::amdgcn_frexp_mant) {
3336
18
        return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3337
18
                                                       Significand));
3338
18
      }
3339
20
3340
20
      // Match instruction special case behavior.
3341
20
      if (Exp == APFloat::IEK_NaN || 
Exp == APFloat::IEK_Inf18
)
3342
6
        Exp = 0;
3343
20
3344
20
      return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3345
20
    }
3346
100
3347
100
    if (isa<UndefValue>(Src))
3348
4
      return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3349
96
3350
96
    break;
3351
96
  }
3352
96
  case Intrinsic::amdgcn_class: {
3353
92
    enum  {
3354
92
      S_NAN = 1 << 0,        // Signaling NaN
3355
92
      Q_NAN = 1 << 1,        // Quiet NaN
3356
92
      N_INFINITY = 1 << 2,   // Negative infinity
3357
92
      N_NORMAL = 1 << 3,     // Negative normal
3358
92
      N_SUBNORMAL = 1 << 4,  // Negative subnormal
3359
92
      N_ZERO = 1 << 5,       // Negative zero
3360
92
      P_ZERO = 1 << 6,       // Positive zero
3361
92
      P_SUBNORMAL = 1 << 7,  // Positive subnormal
3362
92
      P_NORMAL = 1 << 8,     // Positive normal
3363
92
      P_INFINITY = 1 << 9    // Positive infinity
3364
92
    };
3365
92
3366
92
    const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3367
92
      N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3368
92
3369
92
    Value *Src0 = II->getArgOperand(0);
3370
92
    Value *Src1 = II->getArgOperand(1);
3371
92
    const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3372
92
    if (!CMask) {
3373
51
      if (isa<UndefValue>(Src0))
3374
1
        return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3375
50
3376
50
      if (isa<UndefValue>(Src1))
3377
1
        return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3378
49
      break;
3379
49
    }
3380
41
3381
41
    uint32_t Mask = CMask->getZExtValue();
3382
41
3383
41
    // If all tests are made, it doesn't matter what the value is.
3384
41
    if ((Mask & FullMask) == FullMask)
3385
2
      return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3386
39
3387
39
    if ((Mask & FullMask) == 0)
3388
4
      return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3389
35
3390
35
    if (Mask == (S_NAN | Q_NAN)) {
3391
2
      // Equivalent of isnan. Replace with standard fcmp.
3392
2
      Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3393
2
      FCmp->takeName(II);
3394
2
      return replaceInstUsesWith(*II, FCmp);
3395
2
    }
3396
33
3397
33
    if (Mask == (N_ZERO | P_ZERO)) {
3398
1
      // Equivalent of == 0.
3399
1
      Value *FCmp = Builder.CreateFCmpOEQ(
3400
1
        Src0, ConstantFP::get(Src0->getType(), 0.0));
3401
1
3402
1
      FCmp->takeName(II);
3403
1
      return replaceInstUsesWith(*II, FCmp);
3404
1
    }
3405
32
3406
32
    // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3407
32
    if (((Mask & S_NAN) || 
(Mask & Q_NAN)25
) &&
isKnownNeverNaN(Src0, &TLI)9
) {
3408
3
      II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3409
3
                                            Mask & ~(S_NAN | Q_NAN)));
3410
3
      return II;
3411
3
    }
3412
29
3413
29
    const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3414
29
    if (!CVal) {
3415
6
      if (isa<UndefValue>(Src0))
3416
1
        return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3417
5
3418
5
      // Clamp mask to used bits
3419
5
      if ((Mask & FullMask) != Mask) {
3420
1
        CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3421
1
          { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3422
1
        );
3423
1
3424
1
        NewCall->takeName(II);
3425
1
        return replaceInstUsesWith(*II, NewCall);
3426
1
      }
3427
4
3428
4
      break;
3429
4
    }
3430
23
3431
23
    const APFloat &Val = CVal->getValueAPF();
3432
23
3433
23
    bool Result =
3434
23
      ((Mask & S_NAN) && 
Val.isNaN()2
&&
Val.isSignaling()2
) ||
3435
23
      
(22
(Mask & Q_NAN)22
&&
Val.isNaN()1
&&
!Val.isSignaling()1
) ||
3436
23
      
(21
(Mask & N_INFINITY)21
&&
Val.isInfinity()4
&&
Val.isNegative()2
) ||
3437
23
      
(20
(Mask & N_NORMAL)20
&&
Val.isNormal()2
&&
Val.isNegative()2
) ||
3438
23
      
(19
(Mask & N_SUBNORMAL)19
&&
Val.isDenormal()2
&&
Val.isNegative()2
) ||
3439
23
      
(18
(Mask & N_ZERO)18
&&
Val.isZero()2
&&
Val.isNegative()2
) ||
3440
23
      
(17
(Mask & P_ZERO)17
&&
Val.isZero()2
&&
!Val.isNegative()2
) ||
3441
23
      
(16
(Mask & P_SUBNORMAL)16
&&
Val.isDenormal()2
&&
!Val.isNegative()2
) ||
3442
23
      
(15
(Mask & P_NORMAL)15
&&
Val.isNormal()2
&&
!Val.isNegative()2
) ||
3443
23
      
(14
(Mask & P_INFINITY)14
&&
Val.isInfinity()4
&&
!Val.isNegative()2
);
3444
23
3445
23
    return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3446
23
  }
3447
23
  case Intrinsic::amdgcn_cvt_pkrtz: {
3448
17
    Value *Src0 = II->getArgOperand(0);
3449
17
    Value *Src1 = II->getArgOperand(1);
3450
17
    if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3451
4
      if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3452
3
        const fltSemantics &HalfSem
3453
3
          = II->getType()->getScalarType()->getFltSemantics();
3454
3
        bool LosesInfo;
3455
3
        APFloat Val0 = C0->getValueAPF();
3456
3
        APFloat Val1 = C1->getValueAPF();
3457
3
        Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3458
3
        Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3459
3
3460
3
        Constant *Folded = ConstantVector::get({
3461
3
            ConstantFP::get(II->getContext(), Val0),
3462
3
            ConstantFP::get(II->getContext(), Val1) });
3463
3
        return replaceInstUsesWith(*II, Folded);
3464
3
      }
3465
14
    }
3466
14
3467
14
    if (isa<UndefValue>(Src0) && 
isa<UndefValue>(Src1)2
)
3468
1
      return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3469
13
3470
13
    break;
3471
13
  }
3472
44
  case Intrinsic::amdgcn_cvt_pknorm_i16:
3473
44
  case Intrinsic::amdgcn_cvt_pknorm_u16:
3474
44
  case Intrinsic::amdgcn_cvt_pk_i16:
3475
44
  case Intrinsic::amdgcn_cvt_pk_u16: {
3476
44
    Value *Src0 = II->getArgOperand(0);
3477
44
    Value *Src1 = II->getArgOperand(1);
3478
44
3479
44
    if (isa<UndefValue>(Src0) && 
isa<UndefValue>(Src1)8
)
3480
4
      return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3481
40
3482
40
    break;
3483
40
  }
3484
52
  case Intrinsic::amdgcn_ubfe:
3485
52
  case Intrinsic::amdgcn_sbfe: {
3486
52
    // Decompose simple cases into standard shifts.
3487
52
    Value *Src = II->getArgOperand(0);
3488
52
    if (isa<UndefValue>(Src))
3489
1
      return replaceInstUsesWith(*II, Src);
3490
51
3491
51
    unsigned Width;
3492
51
    Type *Ty = II->getType();
3493
51
    unsigned IntSize = Ty->getIntegerBitWidth();
3494
51
3495
51
    ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3496
51
    if (CWidth) {
3497
19
      Width = CWidth->getZExtValue();
3498
19
      if ((Width & (IntSize - 1)) == 0)
3499
4
        return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3500
15
3501
15
      if (Width >= IntSize) {
3502
2
        // Hardware ignores high bits, so remove those.
3503
2
        II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3504
2
                                              Width & (IntSize - 1)));
3505
2
        return II;
3506
2
      }
3507
45
    }
3508
45
3509
45
    unsigned Offset;
3510
45
    ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3511
45
    if (COffset) {
3512
21
      Offset = COffset->getZExtValue();
3513
21
      if (Offset >= IntSize) {
3514
3
        II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3515
3
                                              Offset & (IntSize - 1)));
3516
3
        return II;
3517
3
      }
3518
42
    }
3519
42
3520
42
    bool Signed = IID == Intrinsic::amdgcn_sbfe;
3521
42
3522
42
    if (!CWidth || 
!COffset13
)
3523
34
      break;
3524
8
3525
8
    // The case of Width == 0 is handled above, which makes this tranformation
3526
8
    // safe.  If Width == 0, then the ashr and lshr instructions become poison
3527
8
    // value since the shift amount would be equal to the bit size.
3528
8
    assert(Width != 0);
3529
8
3530
8
    // TODO: This allows folding to undef when the hardware has specific
3531
8
    // behavior?
3532
8
    if (Offset + Width < IntSize) {
3533
6
      Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3534
6
      Value *RightShift = Signed ? 
Builder.CreateAShr(Shl, IntSize - Width)1
3535
6
                                 : 
Builder.CreateLShr(Shl, IntSize - Width)5
;
3536
6
      RightShift->takeName(II);
3537
6
      return replaceInstUsesWith(*II, RightShift);
3538
6
    }
3539
2
3540
2
    Value *RightShift = Signed ? 
Builder.CreateAShr(Src, Offset)1
3541
2
                               : 
Builder.CreateLShr(Src, Offset)1
;
3542
2
3543
2
    RightShift->takeName(II);
3544
2
    return replaceInstUsesWith(*II, RightShift);
3545
2
  }
3546
67
  case Intrinsic::amdgcn_exp:
3547
67
  case Intrinsic::amdgcn_exp_compr: {
3548
67
    ConstantInt *En = cast<ConstantInt>(II->getArgOperand(1));
3549
67
    unsigned EnBits = En->getZExtValue();
3550
67
    if (EnBits == 0xf)
3551
4
      break; // All inputs enabled.
3552
63
3553
63
    bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3554
63
    bool Changed = false;
3555
261
    for (int I = 0; I < (IsCompr ? 
281
:
4180
);
++I198
) {
3556
198
      if ((!IsCompr && 
(EnBits & (1 << I)) == 0144
) ||
3557
198
          
(96
IsCompr96
&&
((EnBits & (0x3 << (2 * I))) == 0)54
)) {
3558
135
        Value *Src = II->getArgOperand(I + 2);
3559
135
        if (!isa<UndefValue>(Src)) {
3560
45
          II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3561
45
          Changed = true;
3562
45
        }
3563
135
      }
3564
198
    }
3565
63
3566
63
    if (Changed)
3567
21
      return II;
3568
42
3569
42
    break;
3570
42
  }
3571
63
  case Intrinsic::amdgcn_fmed3: {
3572
63
    // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3573
63
    // for the shader.
3574
63
3575
63
    Value *Src0 = II->getArgOperand(0);
3576
63
    Value *Src1 = II->getArgOperand(1);
3577
63
    Value *Src2 = II->getArgOperand(2);
3578
63
3579
63
    // Checking for NaN before canonicalization provides better fidelity when
3580
63
    // mapping other operations onto fmed3 since the order of operands is
3581
63
    // unchanged.
3582
63
    CallInst *NewCall = nullptr;
3583
63
    if (match(Src0, m_NaN()) || 
isa<UndefValue>(Src0)57
) {
3584
9
      NewCall = Builder.CreateMinNum(Src1, Src2);
3585
54
    } else if (match(Src1, m_NaN()) || 
isa<UndefValue>(Src1)51
) {
3586
5
      NewCall = Builder.CreateMinNum(Src0, Src2);
3587
49
    } else if (match(Src2, m_NaN()) || 
isa<UndefValue>(Src2)47
) {
3588
4
      NewCall = Builder.CreateMaxNum(Src0, Src1);
3589
4
    }
3590
63
3591
63
    if (NewCall) {
3592
18
      NewCall->copyFastMathFlags(II);
3593
18
      NewCall->takeName(II);
3594
18
      return replaceInstUsesWith(*II, NewCall);
3595
18
    }
3596
45
3597
45
    bool Swap = false;
3598
45
    // Canonicalize constants to RHS operands.
3599
45
    //
3600
45
    // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3601
45
    if (isa<Constant>(Src0) && 
!isa<Constant>(Src1)9
) {
3602
2
      std::swap(Src0, Src1);
3603
2
      Swap = true;
3604
2
    }
3605
45
3606
45
    if (isa<Constant>(Src1) && 
!isa<Constant>(Src2)15
) {
3607
3
      std::swap(Src1, Src2);
3608
3
      Swap = true;
3609
3
    }
3610
45
3611
45
    if (isa<Constant>(Src0) && 
!isa<Constant>(Src1)7
) {
3612
1
      std::swap(Src0, Src1);
3613
1
      Swap = true;
3614
1
    }
3615
45
3616
45
    if (Swap) {
3617
4
      II->setArgOperand(0, Src0);
3618
4
      II->setArgOperand(1, Src1);
3619
4
      II->setArgOperand(2, Src2);
3620
4
      return II;
3621
4
    }
3622
41
3623
41
    if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3624
6
      if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3625
6
        if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3626
6
          APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3627
6
                                       C2->getValueAPF());
3628
6
          return replaceInstUsesWith(*II,
3629
6
            ConstantFP::get(Builder.getContext(), Result));
3630
6
        }
3631
35
      }
3632
6
    }
3633
35
3634
35
    break;
3635
35
  }
3636
190
  case Intrinsic::amdgcn_icmp:
3637
190
  case Intrinsic::amdgcn_fcmp: {
3638
190
    const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand(2));
3639
190
    // Guard against invalid arguments.
3640
190
    int64_t CCVal = CC->getZExtValue();
3641
190
    bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3642
190
    if ((IsInteger && 
(153
CCVal < CmpInst::FIRST_ICMP_PREDICATE153
||
3643
153
                       
CCVal > CmpInst::LAST_ICMP_PREDICATE152
)) ||
3644
190
        
(188
!IsInteger188
&&
(37
CCVal < CmpInst::FIRST_FCMP_PREDICATE37
||
3645
37
                        CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3646
4
      break;
3647
186
3648
186
    Value *Src0 = II->getArgOperand(0);
3649
186
    Value *Src1 = II->getArgOperand(1);
3650
186
3651
186
    if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3652
8
      if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3653
4
        Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3654
4
        if (CCmp->isNullValue()) {
3655
2
          return replaceInstUsesWith(
3656
2
              *II, ConstantExpr::getSExt(CCmp, II->getType()));
3657
2
        }
3658
2
3659
2
        // The result of V_ICMP/V_FCMP assembly instructions (which this
3660
2
        // intrinsic exposes) is one bit per thread, masked with the EXEC
3661
2
        // register (which contains the bitmask of live threads). So a
3662
2
        // comparison that always returns true is the same as a read of the
3663
2
        // EXEC register.
3664
2
        Function *NewF = Intrinsic::getDeclaration(
3665
2
            II->getModule(), Intrinsic::read_register, II->getType());
3666
2
        Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3667
2
        MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3668
2
        Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3669
2
        CallInst *NewCall = Builder.CreateCall(NewF, Args);
3670
2
        NewCall->addAttribute(AttributeList::FunctionIndex,
3671
2
                              Attribute::Convergent);
3672
2
        NewCall->takeName(II);
3673
2
        return replaceInstUsesWith(*II, NewCall);
3674
2
      }
3675
4
3676
4
      // Canonicalize constants to RHS.
3677
4
      CmpInst::Predicate SwapPred
3678
4
        = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3679
4
      II->setArgOperand(0, Src1);
3680
4
      II->setArgOperand(1, Src0);
3681
4
      II->setArgOperand(2, ConstantInt::get(CC->getType(),
3682
4
                                            static_cast<int>(SwapPred)));
3683
4
      return II;
3684
4
    }
3685
178
3686
178
    if (CCVal != CmpInst::ICMP_EQ && 
CCVal != CmpInst::ICMP_NE132
)
3687
74
      break;
3688
104
3689
104
    // Canonicalize compare eq with true value to compare != 0
3690
104
    // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3691
104
    //   -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3692
104
    // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3693
104
    //   -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3694
104
    Value *ExtSrc;
3695
104
    if (CCVal == CmpInst::ICMP_EQ &&
3696
104
        
(46
(46
match(Src1, m_One())46
&&
match(Src0, m_ZExt(m_Value(ExtSrc)))4
) ||
3697
46
         
(44
match(Src1, m_AllOnes())44
&&
match(Src0, m_SExt(m_Value(ExtSrc)))5
)) &&
3698
104
        
ExtSrc->getType()->isIntegerTy(1)6
) {
3699
6
      II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3700
6
      II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3701
6
      return II;
3702
6
    }
3703
98
3704
98
    CmpInst::Predicate SrcPred;
3705
98
    Value *SrcLHS;
3706
98
    Value *SrcRHS;
3707
98
3708
98
    // Fold compare eq/ne with 0 from a compare result as the predicate to the
3709
98
    // intrinsic. The typical use is a wave vote function in the library, which
3710
98
    // will be fed from a user code condition compared with 0. Fold in the
3711
98
    // redundant compare.
3712
98
3713
98
    // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3714
98
    //   -> llvm.amdgcn.[if]cmp(a, b, pred)
3715
98
    //
3716
98
    // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3717
98
    //   -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3718
98
    if (match(Src1, m_Zero()) &&
3719
98
        match(Src0,
3720
59
              m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3721
31
      if (CCVal == CmpInst::ICMP_EQ)
3722
5
        SrcPred = CmpInst::getInversePredicate(SrcPred);
3723
31
3724
31
      Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3725
23
        
Intrinsic::amdgcn_fcmp8
: Intrinsic::amdgcn_icmp;
3726
31
3727
31
      Type *Ty = SrcLHS->getType();
3728
31
      if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3729
23
        // Promote to next legal integer type.
3730
23
        unsigned Width = CmpType->getBitWidth();
3731
23
        unsigned NewWidth = Width;
3732
23
3733
23
        // Don't do anything for i1 comparisons.
3734
23
        if (Width == 1)
3735
0
          break;
3736
23
3737
23
        if (Width <= 16)
3738
9
          NewWidth = 16;
3739
14
        else if (Width <= 32)
3740
10
          NewWidth = 32;
3741
4
        else if (Width <= 64)
3742
3
          NewWidth = 64;
3743
1
        else if (Width > 64)
3744
1
          break; // Can't handle this.
3745
22
3746
22
        if (Width != NewWidth) {
3747
7
          IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3748
7
          if (CmpInst::isSigned(SrcPred)) {
3749
2
            SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3750
2
            SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3751
5
          } else {
3752
5
            SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3753
5
            SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3754
5
          }
3755
7
        }
3756
22
      } else 
if (8
!Ty->isFloatTy()8
&&
!Ty->isDoubleTy()3
&&
!Ty->isHalfTy()2
)
3757
1
        break;
3758
29
3759
29
      Function *NewF =
3760
29
          Intrinsic::getDeclaration(II->getModule(), NewIID,
3761
29
                                    { II->getType(),
3762
29
                                      SrcLHS->getType() });
3763
29
      Value *Args[] = { SrcLHS, SrcRHS,
3764
29
                        ConstantInt::get(CC->getType(), SrcPred) };
3765
29
      CallInst *NewCall = Builder.CreateCall(NewF, Args);
3766
29
      NewCall->takeName(II);
3767
29
      return replaceInstUsesWith(*II, NewCall);
3768
29
    }
3769
67
3770
67
    break;
3771
67
  }
3772
67
  case Intrinsic::amdgcn_wqm_vote: {
3773
3
    // wqm_vote is identity when the argument is constant.
3774
3
    if (!isa<Constant>(II->getArgOperand(0)))
3775
0
      break;
3776
3
3777
3
    return replaceInstUsesWith(*II, II->getArgOperand(0));
3778
3
  }
3779
3
  case Intrinsic::amdgcn_kill: {
3780
1
    const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3781
1
    if (!C || !C->getZExtValue())
3782
0
      break;
3783
1
3784
1
    // amdgcn.kill(i1 1) is a no-op
3785
1
    return eraseInstFromFunction(CI);
3786
1
  }
3787
69
  case Intrinsic::amdgcn_update_dpp: {
3788
69
    Value *Old = II->getArgOperand(0);
3789
69
3790
69
    auto BC = cast<ConstantInt>(II->getArgOperand(5));
3791
69
    auto RM = cast<ConstantInt>(II->getArgOperand(3));
3792
69
    auto BM = cast<ConstantInt>(II->getArgOperand(4));
3793
69
    if (BC->isZeroValue() ||
3794
69
        
RM->getZExtValue() != 0xF4
||
3795
69
        
BM->getZExtValue() != 0xF4
||
3796
69
        
isa<UndefValue>(Old)4
)
3797
68
      break;
3798
1
3799
1
    // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3800
1
    II->setOperand(0, UndefValue::get(Old->getType()));
3801
1
    return II;
3802
1
  }
3803
56
  case Intrinsic::amdgcn_readfirstlane:
3804
56
  case Intrinsic::amdgcn_readlane: {
3805
56
    // A constant value is trivially uniform.
3806
56
    if (Constant *C = dyn_cast<Constant>(II->getArgOperand(0)))
3807
8
      return replaceInstUsesWith(*II, C);
3808
48
3809
48
    // The rest of these may not be safe if the exec may not be the same between
3810
48
    // the def and use.
3811
48
    Value *Src = II->getArgOperand(0);
3812
48
    Instruction *SrcInst = dyn_cast<Instruction>(Src);
3813
48
    if (SrcInst && 
SrcInst->getParent() != II->getParent()10
)
3814
4
      break;
3815
44
3816
44
    // readfirstlane (readfirstlane x) -> readfirstlane x
3817
44
    // readlane (readfirstlane x), y -> readfirstlane x
3818
44
    if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readfirstlane>()))
3819
4
      return replaceInstUsesWith(*II, Src);
3820
40
3821
40
    if (IID == Intrinsic::amdgcn_readfirstlane) {
3822
22
      // readfirstlane (readlane x, y) -> readlane x, y
3823
22
      if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>()))
3824
0
        return replaceInstUsesWith(*II, Src);
3825
18
    } else {
3826
18
      // readlane (readlane x, y), y -> readlane x, y
3827
18
      if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>(
3828
18
                  m_Value(), m_Specific(II->getArgOperand(1)))))
3829
1
        return replaceInstUsesWith(*II, Src);
3830
39
    }
3831
39
3832
39
    break;
3833
39
  }
3834
1.42k
  case Intrinsic::stackrestore: {
3835
1.42k
    // If the save is right next to the restore, remove the restore.  This can
3836
1.42k
    // happen when variable allocas are DCE'd.
3837
1.42k
    if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3838
1.37k
      if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3839
1.37k
        // Skip over debug info.
3840
1.37k
        if (SS->getNextNonDebugInstruction() == II) {
3841
3
          return eraseInstFromFunction(CI);
3842
3
        }
3843
1.42k
      }
3844
1.37k
    }
3845
1.42k
3846
1.42k
    // Scan down this block to see if there is another stack restore in the
3847
1.42k
    // same block without an intervening call/alloca.
3848
1.42k
    BasicBlock::iterator BI(II);
3849
1.42k
    Instruction *TI = II->getParent()->getTerminator();
3850
1.42k
    bool CannotRemove = false;
3851
17.0k
    for (++BI; &*BI != TI; 
++BI15.6k
) {
3852
15.8k
      if (isa<AllocaInst>(BI)) {
3853
0
        CannotRemove = true;
3854
0
        break;
3855
0
      }
3856
15.8k
      if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3857
203
        if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
3858
134
          // If there is a stackrestore below this one, remove this one.
3859
134
          if (II2->getIntrinsicID() == Intrinsic::stackrestore)
3860
4
            return eraseInstFromFunction(CI);
3861
130
3862
130
          // Bail if we cross over an intrinsic with side effects, such as
3863
130
          // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3864
130
          if (II2->mayHaveSideEffects()) {
3865
130
            CannotRemove = true;
3866
130
            break;
3867
130
          }
3868
69
        } else {
3869
69
          // If we found a non-intrinsic call, we can't remove the stack
3870
69
          // restore.
3871
69
          CannotRemove = true;
3872
69
          break;
3873
69
        }
3874
203
      }
3875
15.8k
    }
3876
1.42k
3877
1.42k
    // If the stack restore is in a return, resume, or unwind block and if there
3878
1.42k
    // are no allocas or calls between the restore and the return, nuke the
3879
1.42k
    // restore.
3880
1.42k
    
if (1.41k
!CannotRemove1.41k
&&
(1.21k
isa<ReturnInst>(TI)1.21k
||
isa<ResumeInst>(TI)1.20k
))
3881
16
      return eraseInstFromFunction(CI);
3882
1.40k
    break;
3883
1.40k
  }
3884
1.05M
  case Intrinsic::lifetime_start:
3885
1.05M
    // Asan needs to poison memory to detect invalid access which is possible
3886
1.05M
    // even for empty lifetime range.
3887
1.05M
    if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3888
1.05M
        
II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)1.05M
)
3889
24
      break;
3890
1.05M
3891
1.05M
    if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3892
1.05M
                                  Intrinsic::lifetime_end, *this))
3893
21
      return nullptr;
3894
1.05M
    break;
3895
1.05M
  case Intrinsic::assume: {
3896
417
    Value *IIOperand = II->getArgOperand(0);
3897
417
    // Remove an assume if it is followed by an identical assume.
3898
417
    // TODO: Do we need this? Unless there are conflicting assumptions, the
3899
417
    // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3900
417
    Instruction *Next = II->getNextNonDebugInstruction();
3901
417
    if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3902
3
      return eraseInstFromFunction(CI);
3903
414
3904
414
    // Canonicalize assume(a && b) -> assume(a); assume(b);
3905
414
    // Note: New assumption intrinsics created here are registered by
3906
414
    // the InstCombineIRInserter object.
3907
414
    FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3908
414
    Value *AssumeIntrinsic = II->getCalledValue();
3909
414
    Value *A, *B;
3910
414
    if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
3911
2
      Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
3912
2
      Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3913
2
      return eraseInstFromFunction(*II);
3914
2
    }
3915
412
    // assume(!(a || b)) -> assume(!a); assume(!b);
3916
412
    if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
3917
1
      Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3918
1
                         Builder.CreateNot(A), II->getName());
3919
1
      Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3920
1
                         Builder.CreateNot(B), II->getName());
3921
1
      return eraseInstFromFunction(*II);
3922
1
    }
3923
411
3924
411
    // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3925
411
    // (if assume is valid at the load)
3926
411
    CmpInst::Predicate Pred;
3927
411
    Instruction *LHS;
3928
411
    if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3929
411
        
Pred == ICmpInst::ICMP_NE40
&&
LHS->getOpcode() == Instruction::Load16
&&
3930
411
        
LHS->getType()->isPointerTy()7
&&
3931
411
        
isValidAssumeForContext(II, LHS, &DT)6
) {
3932
3
      MDNode *MD = MDNode::get(II->getContext(), None);
3933
3
      LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3934
3
      return eraseInstFromFunction(*II);
3935
3
3936
3
      // TODO: apply nonnull return attributes to calls and invokes
3937
3
      // TODO: apply range metadata for range check patterns?
3938
3
    }
3939
408
3940
408
    // If there is a dominating assume with the same condition as this one,
3941
408
    // then this one is redundant, and should be removed.
3942
408
    KnownBits Known(1);
3943
408
    computeKnownBits(IIOperand, Known, 0, II);
3944
408
    if (Known.isAllOnes())
3945
3
      return eraseInstFromFunction(*II);
3946
405
3947
405
    // Update the cache of affected values for this assumption (we might be
3948
405
    // here because we just simplified the condition).
3949
405
    AC.updateAffectedValues(II);
3950
405
    break;
3951
405
  }
3952
405
  case Intrinsic::experimental_gc_relocate: {
3953
30
    // Translate facts known about a pointer before relocating into
3954
30
    // facts about the relocate value, while being careful to
3955
30
    // preserve relocation semantics.
3956
30
    Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
3957
30
3958
30
    // Remove the relocation if unused, note that this check is required
3959
30
    // to prevent the cases below from looping forever.
3960
30
    if (II->use_empty())
3961
0
      return eraseInstFromFunction(*II);
3962
30
3963
30
    // Undef is undef, even after relocation.
3964
30
    // TODO: provide a hook for this in GCStrategy.  This is clearly legal for
3965
30
    // most practical collectors, but there was discussion in the review thread
3966
30
    // about whether it was legal for all possible collectors.
3967
30
    if (isa<UndefValue>(DerivedPtr))
3968
1
      // Use undef of gc_relocate's type to replace it.
3969
1
      return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3970
29
3971
29
    if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3972
27
      // The relocation of null will be null for most any collector.
3973
27
      // TODO: provide a hook for this in GCStrategy.  There might be some
3974
27
      // weird collector this property does not hold for.
3975
27
      if (isa<ConstantPointerNull>(DerivedPtr))
3976
1
        // Use null-pointer of gc_relocate's type to replace it.
3977
1
        return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
3978
26
3979
26
      // isKnownNonNull -> nonnull attribute
3980
26
      if (!II->hasRetAttr(Attribute::NonNull) &&
3981
26
          
isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)23
) {
3982
3
        II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
3983
3
        return II;
3984
3
      }
3985
25
    }
3986
25
3987
25
    // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3988
25
    // Canonicalize on the type from the uses to the defs
3989
25
3990
25
    // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
3991
25
    break;
3992
25
  }
3993
25
3994
32
  case Intrinsic::experimental_guard: {
3995
32
    // Is this guard followed by another guard?  We scan forward over a small
3996
32
    // fixed window of instructions to handle common cases with conditions
3997
32
    // computed between guards.
3998
32
    Instruction *NextInst = II->getNextNode();
3999
40
    for (unsigned i = 0; i < GuardWideningWindow; 
i++8
) {
4000
39
      // Note: Using context-free form to avoid compile time blow up
4001
39
      if (!isSafeToSpeculativelyExecute(NextInst))
4002
31
        break;
4003
8
      NextInst = NextInst->getNextNode();
4004
8
    }
4005
32
    Value *NextCond = nullptr;
4006
32
    if (match(NextInst,
4007
32
              m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4008
14
      Value *CurrCond = II->getArgOperand(0);
4009
14
4010
14
      // Remove a guard that it is immediately preceded by an identical guard.
4011
14
      if (CurrCond == NextCond)
4012
9
        return eraseInstFromFunction(*NextInst);
4013
5
4014
5
      // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4015
5
      Instruction* MoveI = II->getNextNode();
4016
10
      while (MoveI != NextInst) {
4017
5
        auto *Temp = MoveI;
4018
5
        MoveI = MoveI->getNextNode();
4019
5
        Temp->moveBefore(II);
4020
5
      }
4021
5
      II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4022
5
      return eraseInstFromFunction(*NextInst);
4023
5
    }
4024
18
    break;
4025
18
  }
4026
3.40M
  }
4027
3.40M
  return visitCallBase(*II);
4028
3.40M
}
4029
4030
// Fence instruction simplification
4031
52.3k
Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
4032
52.3k
  // Remove identical consecutive fences.
4033
52.3k
  Instruction *Next = FI.getNextNonDebugInstruction();
4034
52.3k
  if (auto *NFI = dyn_cast<FenceInst>(Next))
4035
11
    if (FI.isIdenticalTo(NFI))
4036
5
      return eraseInstFromFunction(FI);
4037
52.3k
  return nullptr;
4038
52.3k
}
4039
4040
// InvokeInst simplification
4041
431k
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
4042
431k
  return visitCallBase(II);
4043
431k
}
4044
4045
// CallBrInst simplification
4046
0
Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) {
4047
0
  return visitCallBase(CBI);
4048
0
}
4049
4050
/// If this cast does not affect the value passed through the varargs area, we
4051
/// can eliminate the use of the cast.
4052
static bool isSafeToEliminateVarargsCast(const CallBase &Call,
4053
                                         const DataLayout &DL,
4054
                                         const CastInst *const CI,
4055
71.5k
                                         const int ix) {
4056
71.5k
  if (!CI->isLosslessCast())
4057
71.4k
    return false;
4058
166
4059
166
  // If this is a GC intrinsic, avoid munging types.  We need types for
4060
166
  // statepoint reconstruction in SelectionDAG.
4061
166
  // TODO: This is probably something which should be expanded to all
4062
166
  // intrinsics since the entire point of intrinsics is that
4063
166
  // they are understandable by the optimizer.
4064
166
  if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
4065
0
    return false;
4066
166
4067
166
  // The size of ByVal or InAlloca arguments is derived from the type, so we
4068
166
  // can't change to a type with a different size.  If the size were
4069
166
  // passed explicitly we could avoid this check.
4070
166
  if (!Call.isByValOrInAllocaArgument(ix))
4071
164
    return true;
4072
2
4073
2
  Type* SrcTy =
4074
2
            cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4075
2
  Type *DstTy = Call.isByValArgument(ix)
4076
2
                    ? Call.getParamByValType(ix)
4077
2
                    : 
cast<PointerType>(CI->getType())->getElementType()0
;
4078
2
  if (!SrcTy->isSized() || !DstTy->isSized())
4079
0
    return false;
4080
2
  if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4081
1
    return false;
4082
1
  return true;
4083
1
}
4084
4085
20.1M
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4086
20.1M
  if (!CI->getCalledFunction()) 
return nullptr792k
;
4087
19.3M
4088
19.3M
  auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4089
25
    replaceInstUsesWith(*From, With);
4090
25
  };
4091
19.3M
  auto InstCombineErase = [this](Instruction *I) {
4092
136
    eraseInstFromFunction(*I);
4093
136
  };
4094
19.3M
  LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
4095
19.3M
                               InstCombineErase);
4096
19.3M
  if (Value *With = Simplifier.optimizeCall(CI)) {
4097
26.2k
    ++NumSimplified;
4098
26.2k
    return CI->use_empty() ? 
CI19.1k
:
replaceInstUsesWith(*CI, With)7.15k
;
4099
26.2k
  }
4100
19.3M
4101
19.3M
  return nullptr;
4102
19.3M
}
4103
4104
10
static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
4105
10
  // Strip off at most one level of pointer casts, looking for an alloca.  This
4106
10
  // is good enough in practice and simpler than handling any number of casts.
4107
10
  Value *Underlying = TrampMem->stripPointerCasts();
4108
10
  if (Underlying != TrampMem &&
4109
10
      
(5
!Underlying->hasOneUse()5
||
Underlying->user_back() != TrampMem5
))
4110
0
    return nullptr;
4111
10
  if (!isa<AllocaInst>(Underlying))
4112
5
    return nullptr;
4113
5
4114
5
  IntrinsicInst *InitTrampoline = nullptr;
4115
12
  for (User *U : TrampMem->users()) {
4116
12
    IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4117
12
    if (!II)
4118
0
      return nullptr;
4119
12
    if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4120
5
      if (InitTrampoline)
4121
0
        // More than one init_trampoline writes to this value.  Give up.
4122
0
        return nullptr;
4123
5
      InitTrampoline = II;
4124
5
      continue;
4125
5
    }
4126
7
    if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4127
7
      // Allow any number of calls to adjust.trampoline.
4128
7
      continue;
4129
0
    return nullptr;
4130
0
  }
4131
5
4132
5
  // No call to init.trampoline found.
4133
5
  if (!InitTrampoline)
4134
0
    return nullptr;
4135
5
4136
5
  // Check that the alloca is being used in the expected way.
4137
5
  if (InitTrampoline->getOperand(0) != TrampMem)
4138
0
    return nullptr;
4139
5
4140
5
  return InitTrampoline;
4141
5
}
4142
4143
static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
4144
5
                                               Value *TrampMem) {
4145
5
  // Visit all the previous instructions in the basic block, and try to find a
4146
5
  // init.trampoline which has a direct path to the adjust.trampoline.
4147
5
  for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4148
5
                            E = AdjustTramp->getParent()->begin();
4149
5
       I != E;) {
4150
4
    Instruction *Inst = &*--I;
4151
4
    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4152
2
      if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4153
2
          II->getOperand(0) == TrampMem)
4154
2
        return II;
4155
2
    if (Inst->mayWriteToMemory())
4156
2
      return nullptr;
4157
2
  }
4158
5
  
return nullptr1
;
4159
5
}
4160
4161
// Given a call to llvm.adjust.trampoline, find and return the corresponding
4162
// call to llvm.init.trampoline if the call to the trampoline can be optimized
4163
// to a direct call to a function.  Otherwise return NULL.
4164
20.5M
static IntrinsicInst *findInitTrampoline(Value *Callee) {
4165
20.5M
  Callee = Callee->stripPointerCasts();
4166
20.5M
  IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4167
20.5M
  if (!AdjustTramp ||
4168
20.5M
      
AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline136
)
4169
20.5M
    return nullptr;
4170
9
4171
9
  Value *TrampMem = AdjustTramp->getOperand(0);
4172
9
4173
9
  if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
4174
5
    return IT;
4175
4
  if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4176
2
    return IT;
4177
2
  return nullptr;
4178
2
}
4179
4180
/// Improvements for call, callbr and invoke instructions.
4181
20.7M
Instruction *InstCombiner::visitCallBase(CallBase &Call) {
4182
20.7M
  if (isAllocLikeFn(&Call, &TLI))
4183
179k
    return visitAllocSite(Call);
4184
20.5M
4185
20.5M
  bool Changed = false;
4186
20.5M
4187
20.5M
  // Mark any parameters that are known to be non-null with the nonnull
4188
20.5M
  // attribute.  This is helpful for inlining calls to functions with null
4189
20.5M
  // checks on their arguments.
4190
20.5M
  SmallVector<unsigned, 4> ArgNos;
4191
20.5M
  unsigned ArgNo = 0;
4192
20.5M
4193
46.8M
  for (Value *V : Call.args()) {
4194
46.8M
    if (V->getType()->isPointerTy() &&
4195
46.8M
        
!Call.paramHasAttr(ArgNo, Attribute::NonNull)32.2M
&&
4196
46.8M
        
isKnownNonZero(V, DL, 0, &AC, &Call, &DT)20.9M
)
4197
1.02M
      ArgNos.push_back(ArgNo);
4198
46.8M
    ArgNo++;
4199
46.8M
  }
4200
20.5M
4201
20.5M
  assert(ArgNo == Call.arg_size() && "sanity check");
4202
20.5M
4203
20.5M
  if (!ArgNos.empty()) {
4204
893k
    AttributeList AS = Call.getAttributes();
4205
893k
    LLVMContext &Ctx = Call.getContext();
4206
893k
    AS = AS.addParamAttribute(Ctx, ArgNos,
4207
893k
                              Attribute::get(Ctx, Attribute::NonNull));
4208
893k
    Call.setAttributes(AS);
4209
893k
    Changed = true;
4210
893k
  }
4211
20.5M
4212
20.5M
  // If the callee is a pointer to a function, attempt to move any casts to the
4213
20.5M
  // arguments of the call/callbr/invoke.
4214
20.5M
  Value *Callee = Call.getCalledValue();
4215
20.5M
  if (!isa<Function>(Callee) && 
transformConstExprCastCall(Call)851k
)
4216
202
    return nullptr;
4217
20.5M
4218
20.5M
  if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4219
19.7M
    // Remove the convergent attr on calls when the callee is not convergent.
4220
19.7M
    if (Call.isConvergent() && 
!CalleeF->isConvergent()2.98k
&&
4221
19.7M
        
!CalleeF->isIntrinsic()33
) {
4222
4
      LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4223
4
                        << "\n");
4224
4
      Call.setNotConvergent();
4225
4
      return &Call;
4226
4
    }
4227
19.7M
4228
19.7M
    // If the call and callee calling conventions don't match, this call must
4229
19.7M
    // be unreachable, as the call is undefined.
4230
19.7M
    if (CalleeF->getCallingConv() != Call.getCallingConv() &&
4231
19.7M
        // Only do this for calls to a function with a body.  A prototype may
4232
19.7M
        // not actually end up matching the implementation's calling conv for a
4233
19.7M
        // variety of reasons (e.g. it may be written in assembly).
4234
19.7M
        
!CalleeF->isDeclaration()71
) {
4235
16
      Instruction *OldCall = &Call;
4236
16
      CreateNonTerminatorUnreachable(OldCall);
4237
16
      // If OldCall does not return void then replaceAllUsesWith undef.
4238
16
      // This allows ValueHandlers and custom metadata to adjust itself.
4239
16
      if (!OldCall->getType()->isVoidTy())
4240
0
        replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4241
16
      if (isa<CallInst>(OldCall))
4242
16
        return eraseInstFromFunction(*OldCall);
4243
0
4244
0
      // We cannot remove an invoke or a callbr, because it would change thexi
4245
0
      // CFG, just change the callee to a null pointer.
4246
0
      cast<CallBase>(OldCall)->setCalledFunction(
4247
0
          CalleeF->getFunctionType(),
4248
0
          Constant::getNullValue(CalleeF->getType()));
4249
0
      return nullptr;
4250
0
    }
4251
19.7M
  }
4252
20.5M
4253
20.5M
  if ((isa<ConstantPointerNull>(Callee) &&
4254
20.5M
       
!NullPointerIsDefined(Call.getFunction())3
) ||
4255
20.5M
      
isa<UndefValue>(Callee)20.5M
) {
4256
8
    // If Call does not return void then replaceAllUsesWith undef.
4257
8
    // This allows ValueHandlers and custom metadata to adjust itself.
4258
8
    if (!Call.getType()->isVoidTy())
4259
8
      replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
4260
8
4261
8
    if (Call.isTerminator()) {
4262
8
      // Can't remove an invoke or callbr because we cannot change the CFG.
4263
8
      return nullptr;
4264
8
    }
4265
0
4266
0
    // This instruction is not reachable, just remove it.
4267
0
    CreateNonTerminatorUnreachable(&Call);
4268
0
    return eraseInstFromFunction(Call);
4269
0
  }
4270
20.5M
4271
20.5M
  if (IntrinsicInst *II = findInitTrampoline(Callee))
4272
7
    return transformCallThroughTrampoline(Call, *II);
4273
20.5M
4274
20.5M
  PointerType *PTy = cast<PointerType>(Callee->getType());
4275
20.5M
  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4276
20.5M
  if (FTy->isVarArg()) {
4277
481k
    int ix = FTy->getNumParams();
4278
481k
    // See if we can optimize any arguments passed through the varargs area of
4279
481k
    // the call.
4280
481k
    for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
4281
1.22M
         I != E; 
++I, ++ix747k
) {
4282
747k
      CastInst *CI = dyn_cast<CastInst>(*I);
4283
747k
      if (CI && 
isSafeToEliminateVarargsCast(Call, DL, CI, ix)71.5k
) {
4284
165
        *I = CI->getOperand(0);
4285
165
4286
165
        // Update the byval type to match the argument type.
4287
165
        if (Call.isByValArgument(ix)) {
4288
1
          Call.removeParamAttr(ix, Attribute::ByVal);
4289
1
          Call.addParamAttr(
4290
1
              ix, Attribute::getWithByValType(
4291
1
                      Call.getContext(),
4292
1
                      CI->getOperand(0)->getType()->getPointerElementType()));
4293
1
        }
4294
165
        Changed = true;
4295
165
      }
4296
747k
    }
4297
481k
  }
4298
20.5M
4299
20.5M
  if (isa<InlineAsm>(Callee) && 
!Call.doesNotThrow()129k
) {
4300
35
    // Inline asm calls cannot throw - mark them 'nounwind'.
4301
35
    Call.setDoesNotThrow();
4302
35
    Changed = true;
4303
35
  }
4304
20.5M
4305
20.5M
  // Try to optimize the call if possible, we require DataLayout for most of
4306
20.5M
  // this.  None of these calls are seen as possibly dead so go ahead and
4307
20.5M
  // delete the instruction now.
4308
20.5M
  if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4309
20.1M
    Instruction *I = tryOptimizeCall(CI);
4310
20.1M
    // If we changed something return the result, etc. Otherwise let
4311
20.1M
    // the fallthrough check.
4312
20.1M
    if (I) 
return eraseInstFromFunction(*I)26.2k
;
4313
20.5M
  }
4314
20.5M
4315
20.5M
  return Changed ? 
&Call892k
:
nullptr19.6M
;
4316
20.5M
}
4317
4318
/// If the callee is a constexpr cast of a function, attempt to move the cast to
4319
/// the arguments of the call/callbr/invoke.
4320
851k
bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
4321
851k
  auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
4322
851k
  if (!Callee)
4323
802k
    return false;
4324
49.0k
4325
49.0k
  // If this is a call to a thunk function, don't remove the cast. Thunks are
4326
49.0k
  // used to transparently forward all incoming parameters and outgoing return
4327
49.0k
  // values, so it's important to leave the cast in place.
4328
49.0k
  if (Callee->hasFnAttribute("thunk"))
4329
3
    return false;
4330
49.0k
4331
49.0k
  // If this is a musttail call, the callee's prototype must match the caller's
4332
49.0k
  // prototype with the exception of pointee types. The code below doesn't
4333
49.0k
  // implement that, so we can't do this transform.
4334
49.0k
  // TODO: Do the transform if it only requires adding pointer casts.
4335
49.0k
  if (Call.isMustTailCall())
4336
4
    return false;
4337
49.0k
4338
49.0k
  Instruction *Caller = &Call;
4339
49.0k
  const AttributeList &CallerPAL = Call.getAttributes();
4340
49.0k
4341
49.0k
  // Okay, this is a cast from a function to a different type.  Unless doing so
4342
49.0k
  // would cause a type conversion of one of our arguments, change this call to
4343
49.0k
  // be a direct call with arguments casted to the appropriate types.
4344
49.0k
  FunctionType *FT = Callee->getFunctionType();
4345
49.0k
  Type *OldRetTy = Caller->getType();
4346
49.0k
  Type *NewRetTy = FT->getReturnType();
4347
49.0k
4348
49.0k
  // Check to see if we are changing the return type...
4349
49.0k
  if (OldRetTy != NewRetTy) {
4350
2.02k
4351
2.02k
    if (NewRetTy->isStructTy())
4352
0
      return false; // TODO: Handle multiple return values.
4353
2.02k
4354
2.02k
    if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4355
1.60k
      if (Callee->isDeclaration())
4356
1.58k
        return false;   // Cannot transform this return value.
4357
19
4358
19
      if (!Caller->use_empty() &&
4359
19
          // void -> non-void is handled specially
4360
19
          
!NewRetTy->isVoidTy()16
)
4361
11
        return false;   // Cannot transform this return value.
4362
426
    }
4363
426
4364
426
    if (!CallerPAL.isEmpty() && 
!Caller->use_empty()277
) {
4365
276
      AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4366
276
      if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4367
0
        return false;   // Attribute not compatible with transformed value.
4368
426
    }
4369
426
4370
426
    // If the callbase is an invoke/callbr instruction, and the return value is
4371
426
    // used by a PHI node in a successor, we cannot change the return type of
4372
426
    // the call because there is no place to put the cast instruction (without
4373
426
    // breaking the critical edge).  Bail out in this case.
4374
426
    if (!Caller->use_empty()) {
4375
423
      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4376
29
        for (User *U : II->users())
4377
49
          if (PHINode *PN = dyn_cast<PHINode>(U))
4378
20
            if (PN->getParent() == II->getNormalDest() ||
4379
20
                PN->getParent() == II->getUnwindDest())
4380
0
              return false;
4381
423
      // FIXME: Be conservative for callbr to avoid a quadratic search.
4382
423
      if (isa<CallBrInst>(Caller))
4383
0
        return false;
4384
47.4k
    }
4385
426
  }
4386
47.4k
4387
47.4k
  unsigned NumActualArgs = Call.arg_size();
4388
47.4k
  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4389
47.4k
4390
47.4k
  // Prevent us turning:
4391
47.4k
  // declare void @takes_i32_inalloca(i32* inalloca)
4392
47.4k
  //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4393
47.4k
  //
4394
47.4k
  // into:
4395
47.4k
  //  call void @takes_i32_inalloca(i32* null)
4396
47.4k
  //
4397
47.4k
  //  Similarly, avoid folding away bitcasts of byval calls.
4398
47.4k
  if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4399
47.4k
      Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4400
2
    return false;
4401
47.4k
4402
47.4k
  auto AI = Call.arg_begin();
4403
50.4k
  for (unsigned i = 0, e = NumCommonArgs; i != e; 
++i, ++AI3.05k
) {
4404
3.22k
    Type *ParamTy = FT->getParamType(i);
4405
3.22k
    Type *ActTy = (*AI)->getType();
4406
3.22k
4407
3.22k
    if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4408
173
      return false;   // Cannot transform this parameter value.
4409
3.05k
4410
3.05k
    if (AttrBuilder(CallerPAL.getParamAttributes(i))
4411
3.05k
            .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4412
3
      return false;   // Attribute not compatible with transformed value.
4413
3.05k
4414
3.05k
    if (Call.isInAllocaArgument(i))
4415
0
      return false;   // Cannot transform to and from inalloca.
4416
3.05k
4417
3.05k
    // If the parameter is passed as a byval argument, then we have to have a
4418
3.05k
    // sized type and the sized type has to have the same size as the old type.
4419
3.05k
    if (ParamTy != ActTy && 
CallerPAL.hasParamAttribute(i, Attribute::ByVal)156
) {
4420
3
      PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4421
3
      if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4422
2
        return false;
4423
1
4424
1
      Type *CurElTy = Call.getParamByValType(i);
4425
1
      if (DL.getTypeAllocSize(CurElTy) !=
4426
1
          DL.getTypeAllocSize(ParamPTy->getElementType()))
4427
0
        return false;
4428
1
    }
4429
3.05k
  }
4430
47.4k
4431
47.4k
  
if (47.2k
Callee->isDeclaration()47.2k
) {
4432
47.0k
    // Do not delete arguments unless we have a function body.
4433
47.0k
    if (FT->getNumParams() < NumActualArgs && 
!FT->isVarArg()39.4k
)
4434
8
      return false;
4435
47.0k
4436
47.0k
    // If the callee is just a declaration, don't change the varargsness of the
4437
47.0k
    // call.  We don't want to introduce a varargs call where one doesn't
4438
47.0k
    // already exist.
4439
47.0k
    PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
4440
47.0k
    if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4441
46.9k
      return false;
4442
83
4443
83
    // If both the callee and the cast type are varargs, we still have to make
4444
83
    // sure the number of fixed parameters are the same or we have the same
4445
83
    // ABI issues as if we introduce a varargs call.
4446
83
    if (FT->isVarArg() &&
4447
83
        
cast<FunctionType>(APTy->getElementType())->isVarArg()67
&&
4448
83
        FT->getNumParams() !=
4449
67
        cast<FunctionType>(APTy->getElementType())->getNumParams())
4450
67
      return false;
4451
203
  }
4452
203
4453
203
  if (FT->getNumParams() < NumActualArgs && 
FT->isVarArg()13
&&
4454
203
      
!CallerPAL.isEmpty()10
) {
4455
7
    // In this case we have more arguments than the new function type, but we
4456
7
    // won't be dropping them.  Check that these extra arguments have attributes
4457
7
    // that are compatible with being a vararg call argument.
4458
7
    unsigned SRetIdx;
4459
7
    if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4460
7
        
SRetIdx > FT->getNumParams()4
)
4461
4
      return false;
4462
199
  }
4463
199
4464
199
  // Okay, we decided that this is a safe thing to do: go ahead and start
4465
199
  // inserting cast instructions as necessary.
4466
199
  SmallVector<Value *, 8> Args;
4467
199
  SmallVector<AttributeSet, 8> ArgAttrs;
4468
199
  Args.reserve(NumActualArgs);
4469
199
  ArgAttrs.reserve(NumActualArgs);
4470
199
4471
199
  // Get any return attributes.
4472
199
  AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4473
199
4474
199
  // If the return value is not being used, the type may not be compatible
4475
199
  // with the existing attributes.  Wipe out any problematic attributes.
4476
199
  RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4477
199
4478
199
  LLVMContext &Ctx = Call.getContext();
4479
199
  AI = Call.arg_begin();
4480
383
  for (unsigned i = 0; i != NumCommonArgs; 
++i, ++AI184
) {
4481
184
    Type *ParamTy = FT->getParamType(i);
4482
184
4483
184
    Value *NewArg = *AI;
4484
184
    if ((*AI)->getType() != ParamTy)
4485
154
      NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4486
184
    Args.push_back(NewArg);
4487
184
4488
184
    // Add any parameter attributes.
4489
184
    if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4490
1
      AttrBuilder AB(CallerPAL.getParamAttributes(i));
4491
1
      AB.addByValAttr(NewArg->getType()->getPointerElementType());
4492
1
      ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
4493
1
    } else
4494
183
      ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4495
184
  }
4496
199
4497
199
  // If the function takes more arguments than the call was taking, add them
4498
199
  // now.
4499
207
  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); 
++i8
) {
4500
8
    Args.push_back(Constant::getNullValue(FT->getParamType(i)));
4501
8
    ArgAttrs.push_back(AttributeSet());
4502
8
  }
4503
199
4504
199
  // If we are removing arguments to the function, emit an obnoxious warning.
4505
199
  if (FT->getNumParams() < NumActualArgs) {
4506
9
    // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4507
9
    if (FT->isVarArg()) {
4508
6
      // Add all of the arguments in their promoted form to the arg list.
4509
14
      for (unsigned i = FT->getNumParams(); i != NumActualArgs; 
++i, ++AI8
) {
4510
8
        Type *PTy = getPromotedType((*AI)->getType());
4511
8
        Value *NewArg = *AI;
4512
8
        if (PTy != (*AI)->getType()) {
4513
1
          // Must promote to pass through va_arg area!
4514
1
          Instruction::CastOps opcode =
4515
1
            CastInst::getCastOpcode(*AI, false, PTy, false);
4516
1
          NewArg = Builder.CreateCast(opcode, *AI, PTy);
4517
1
        }
4518
8
        Args.push_back(NewArg);
4519
8
4520
8
        // Add any parameter attributes.
4521
8
        ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4522
8
      }
4523
6
    }
4524
9
  }
4525
199
4526
199
  AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4527
199
4528
199
  if (NewRetTy->isVoidTy())
4529
105
    Caller->setName("");   // Void type should not have a name.
4530
199
4531
199
  assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4532
199
         "missing argument attributes");
4533
199
  AttributeList NewCallerPAL = AttributeList::get(
4534
199
      Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4535
199
4536
199
  SmallVector<OperandBundleDef, 1> OpBundles;
4537
199
  Call.getOperandBundlesAsDefs(OpBundles);
4538
199
4539
199
  CallBase *NewCall;
4540
199
  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4541
27
    NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4542
27
                                   II->getUnwindDest(), Args, OpBundles);
4543
172
  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4544
0
    NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
4545
0
                                   CBI->getIndirectDests(), Args, OpBundles);
4546
172
  } else {
4547
172
    NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4548
172
    cast<CallInst>(NewCall)->setTailCallKind(
4549
172
        cast<CallInst>(Caller)->getTailCallKind());
4550
172
  }
4551
199
  NewCall->takeName(Caller);
4552
199
  NewCall->setCallingConv(Call.getCallingConv());
4553
199
  NewCall->setAttributes(NewCallerPAL);
4554
199
4555
199
  // Preserve the weight metadata for the new call instruction. The metadata
4556
199
  // is used by SamplePGO to check callsite's hotness.
4557
199
  uint64_t W;
4558
199
  if (Caller->extractProfTotalWeight(W))
4559
2
    NewCall->setProfWeight(W);
4560
199
4561
199
  // Insert a cast of the return type as necessary.
4562
199
  Instruction *NC = NewCall;
4563
199
  Value *NV = NC;
4564
199
  if (OldRetTy != NV->getType() && 
!Caller->use_empty()25
) {
4565
24
    if (!NV->getType()->isVoidTy()) {
4566
20
      NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4567
20
      NC->setDebugLoc(Caller->getDebugLoc());
4568
20
4569
20
      // If this is an invoke/callbr instruction, we should insert it after the
4570
20
      // first non-phi instruction in the normal successor block.
4571
20
      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4572
1
        BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4573
1
        InsertNewInstBefore(NC, *I);
4574
19
      } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4575
0
        BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
4576
0
        InsertNewInstBefore(NC, *I);
4577
19
      } else {
4578
19
        // Otherwise, it's a call, just insert cast right after the call.
4579
19
        InsertNewInstBefore(NC, *Caller);
4580
19
      }
4581
20
      Worklist.AddUsersToWorkList(*Caller);
4582
20
    } else {
4583
4
      NV = UndefValue::get(Caller->getType());
4584
4
    }
4585
24
  }
4586
199
4587
199
  if (!Caller->use_empty())
4588
65
    replaceInstUsesWith(*Caller, NV);
4589
134
  else if (Caller->hasValueHandle()) {
4590
73
    if (OldRetTy == NV->getType())
4591
73
      ValueHandleBase::ValueIsRAUWd(Caller, NV);
4592
0
    else
4593
0
      // We cannot call ValueIsRAUWd with a different type, and the
4594
0
      // actual tracked value will disappear.
4595
0
      ValueHandleBase::ValueIsDeleted(Caller);
4596
73
  }
4597
199
4598
199
  eraseInstFromFunction(*Caller);
4599
199
  return true;
4600
199
}
4601
4602
/// Turn a call to a function created by init_trampoline / adjust_trampoline
4603
/// intrinsic pair into a direct call to the underlying function.
4604
Instruction *
4605
InstCombiner::transformCallThroughTrampoline(CallBase &Call,
4606
7
                                             IntrinsicInst &Tramp) {
4607
7
  Value *Callee = Call.getCalledValue();
4608
7
  Type *CalleeTy = Callee->getType();
4609
7
  FunctionType *FTy = Call.getFunctionType();
4610
7
  AttributeList Attrs = Call.getAttributes();
4611
7
4612
7
  // If the call already has the 'nest' attribute somewhere then give up -
4613
7
  // otherwise 'nest' would occur twice after splicing in the chain.
4614
7
  if (Attrs.hasAttrSomewhere(Attribute::Nest))
4615
0
    return nullptr;
4616
7
4617
7
  Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4618
7
  FunctionType *NestFTy = NestF->getFunctionType();
4619
7
4620
7
  AttributeList NestAttrs = NestF->getAttributes();
4621
7
  if (!NestAttrs.isEmpty()) {
4622
7
    unsigned NestArgNo = 0;
4623
7
    Type *NestTy = nullptr;
4624
7
    AttributeSet NestAttr;
4625
7
4626
7
    // Look for a parameter marked with the 'nest' attribute.
4627
7
    for (FunctionType::param_iterator I = NestFTy->param_begin(),
4628
7
                                      E = NestFTy->param_end();
4629
7
         I != E; 
++NestArgNo, ++I0
) {
4630
7
      AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4631
7
      if (AS.hasAttribute(Attribute::Nest)) {
4632
7
        // Record the parameter type and any other attributes.
4633
7
        NestTy = *I;
4634
7
        NestAttr = AS;
4635
7
        break;
4636
7
      }
4637
7
    }
4638
7
4639
7
    if (NestTy) {
4640
7
      std::vector<Value*> NewArgs;
4641
7
      std::vector<AttributeSet> NewArgAttrs;
4642
7
      NewArgs.reserve(Call.arg_size() + 1);
4643
7
      NewArgAttrs.reserve(Call.arg_size());
4644
7
4645
7
      // Insert the nest argument into the call argument list, which may
4646
7
      // mean appending it.  Likewise for attributes.
4647
7
4648
7
      {
4649
7
        unsigned ArgNo = 0;
4650
7
        auto I = Call.arg_begin(), E = Call.arg_end();
4651
14
        do {
4652
14
          if (ArgNo == NestArgNo) {
4653
7
            // Add the chain argument and attributes.
4654
7
            Value *NestVal = Tramp.getArgOperand(2);
4655
7
            if (NestVal->getType() != NestTy)
4656
1
              NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4657
7
            NewArgs.push_back(NestVal);
4658
7
            NewArgAttrs.push_back(NestAttr);
4659
7
          }
4660
14
4661
14
          if (I == E)
4662
7
            break;
4663
7
4664
7
          // Add the original argument and attributes.
4665
7
          NewArgs.push_back(*I);
4666
7
          NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4667
7
4668
7
          ++ArgNo;
4669
7
          ++I;
4670
7
        } while (true);
4671
7
      }
4672
7
4673
7
      // The trampoline may have been bitcast to a bogus type (FTy).
4674
7
      // Handle this by synthesizing a new function type, equal to FTy
4675
7
      // with the chain parameter inserted.
4676
7
4677
7
      std::vector<Type*> NewTypes;
4678
7
      NewTypes.reserve(FTy->getNumParams()+1);
4679
7
4680
7
      // Insert the chain's type into the list of parameter types, which may
4681
7
      // mean appending it.
4682
7
      {
4683
7
        unsigned ArgNo = 0;
4684
7
        FunctionType::param_iterator I = FTy->param_begin(),
4685
7
          E = FTy->param_end();
4686
7
4687
13
        do {
4688
13
          if (ArgNo == NestArgNo)
4689
7
            // Add the chain's type.
4690
7
            NewTypes.push_back(NestTy);
4691
13
4692
13
          if (I == E)
4693
7
            break;
4694
6
4695
6
          // Add the original type.
4696
6
          NewTypes.push_back(*I);
4697
6
4698
6
          ++ArgNo;
4699
6
          ++I;
4700
6
        } while (true);
4701
7
      }
4702
7
4703
7
      // Replace the trampoline call with a direct call.  Let the generic
4704
7
      // code sort out any function type mismatches.
4705
7
      FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4706
7
                                                FTy->isVarArg());
4707
7
      Constant *NewCallee =
4708
7
        NestF->getType() == PointerType::getUnqual(NewFTy) ?
4709
7
        NestF : ConstantExpr::getBitCast(NestF,
4710
0
                                         PointerType::getUnqual(NewFTy));
4711
7
      AttributeList NewPAL =
4712
7
          AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4713
7
                             Attrs.getRetAttributes(), NewArgAttrs);
4714
7
4715
7
      SmallVector<OperandBundleDef, 1> OpBundles;
4716
7
      Call.getOperandBundlesAsDefs(OpBundles);
4717
7
4718
7
      Instruction *NewCaller;
4719
7
      if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4720
0
        NewCaller = InvokeInst::Create(NewFTy, NewCallee,
4721
0
                                       II->getNormalDest(), II->getUnwindDest(),
4722
0
                                       NewArgs, OpBundles);
4723
0
        cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4724
0
        cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4725
7
      } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4726
0
        NewCaller =
4727
0
            CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
4728
0
                               CBI->getIndirectDests(), NewArgs, OpBundles);
4729
0
        cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4730
0
        cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4731
7
      } else {
4732
7
        NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
4733
7
        cast<CallInst>(NewCaller)->setTailCallKind(
4734
7
            cast<CallInst>(Call).getTailCallKind());
4735
7
        cast<CallInst>(NewCaller)->setCallingConv(
4736
7
            cast<CallInst>(Call).getCallingConv());
4737
7
        cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4738
7
      }
4739
7
      NewCaller->setDebugLoc(Call.getDebugLoc());
4740
7
4741
7
      return NewCaller;
4742
7
    }
4743
0
  }
4744
0
4745
0
  // Replace the trampoline call with a direct call.  Since there is no 'nest'
4746
0
  // parameter, there is no need to adjust the argument list.  Let the generic
4747
0
  // code sort out any function type mismatches.
4748
0
  Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
4749
0
  Call.setCalledFunction(FTy, NewCallee);
4750
0
  return &Call;
4751
0
}