Coverage Report

Created: 2023-09-21 18:56

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- Store.cpp - Interface for maps from Locations to Values ------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
//  This file defined the types Store and StoreManager.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
14
#include "clang/AST/ASTContext.h"
15
#include "clang/AST/CXXInheritance.h"
16
#include "clang/AST/CharUnits.h"
17
#include "clang/AST/Decl.h"
18
#include "clang/AST/DeclCXX.h"
19
#include "clang/AST/DeclObjC.h"
20
#include "clang/AST/Expr.h"
21
#include "clang/AST/Type.h"
22
#include "clang/Basic/LLVM.h"
23
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
24
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
25
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
26
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
27
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
28
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
29
#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
30
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
31
#include "llvm/ADT/APSInt.h"
32
#include "llvm/ADT/SmallVector.h"
33
#include "llvm/Support/Casting.h"
34
#include "llvm/Support/ErrorHandling.h"
35
#include <cassert>
36
#include <cstdint>
37
#include <optional>
38
39
using namespace clang;
40
using namespace ento;
41
42
StoreManager::StoreManager(ProgramStateManager &stateMgr)
43
16.2k
    : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
44
16.2k
      MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
45
46
StoreRef StoreManager::enterStackFrame(Store OldStore,
47
                                       const CallEvent &Call,
48
35.1k
                                       const StackFrameContext *LCtx) {
49
35.1k
  StoreRef Store = StoreRef(OldStore, *this);
50
51
35.1k
  SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
52
35.1k
  Call.getInitialStackFrameContents(LCtx, InitialBindings);
53
54
35.1k
  for (const auto &I : InitialBindings)
55
40.3k
    Store = Bind(Store.getStore(), I.first.castAs<Loc>(), I.second);
56
57
35.1k
  return Store;
58
35.1k
}
59
60
const ElementRegion *StoreManager::MakeElementRegion(const SubRegion *Base,
61
                                                     QualType EleTy,
62
7.01k
                                                     uint64_t index) {
63
7.01k
  NonLoc idx = svalBuilder.makeArrayIndex(index);
64
7.01k
  return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
65
7.01k
}
66
67
const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
68
15.1k
                                                        QualType T) {
69
15.1k
  NonLoc idx = svalBuilder.makeZeroArrayIndex();
70
15.1k
  assert(!T.isNull());
71
15.1k
  return MRMgr.getElementRegion(T, idx, R, Ctx);
72
15.1k
}
73
74
std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
75
67.2k
                                                          QualType CastToTy) {
76
67.2k
  ASTContext &Ctx = StateMgr.getContext();
77
78
  // Handle casts to Objective-C objects.
79
67.2k
  if (CastToTy->isObjCObjectPointerType())
80
1.26k
    return R->StripCasts();
81
82
65.9k
  if (CastToTy->isBlockPointerType()) {
83
    // FIXME: We may need different solutions, depending on the symbol
84
    // involved.  Blocks can be casted to/from 'id', as they can be treated
85
    // as Objective-C objects.  This could possibly be handled by enhancing
86
    // our reasoning of downcasts of symbolic objects.
87
3
    if (isa<CodeTextRegion, SymbolicRegion>(R))
88
2
      return R;
89
90
    // We don't know what to make of it.  Return a NULL region, which
91
    // will be interpreted as UnknownVal.
92
1
    return std::nullopt;
93
3
  }
94
95
  // Now assume we are casting from pointer to pointer. Other cases should
96
  // already be handled.
97
65.9k
  QualType PointeeTy = CastToTy->getPointeeType();
98
65.9k
  QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
99
65.9k
  CanonPointeeTy = CanonPointeeTy.getLocalUnqualifiedType();
100
101
  // Handle casts to void*.  We just pass the region through.
102
65.9k
  if (CanonPointeeTy == Ctx.VoidTy)
103
488
    return R;
104
105
65.7k
  
const auto IsSameRegionType = [&Ctx](const MemRegion *R, QualType OtherTy) 65.4k
{
106
65.7k
    if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
107
59.7k
      QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
108
59.7k
      if (OtherTy == ObjTy.getLocalUnqualifiedType())
109
58.4k
        return true;
110
59.7k
    }
111
7.23k
    return false;
112
65.7k
  };
113
114
  // Handle casts from compatible types.
115
65.4k
  if (R->isBoundable() && 
IsSameRegionType(R, CanonPointeeTy)65.4k
)
116
58.4k
    return R;
117
118
  // Process region cast according to the kind of the region being cast.
119
7.00k
  switch (R->getKind()) {
120
0
    case MemRegion::CXXThisRegionKind:
121
0
    case MemRegion::CodeSpaceRegionKind:
122
0
    case MemRegion::StackLocalsSpaceRegionKind:
123
0
    case MemRegion::StackArgumentsSpaceRegionKind:
124
0
    case MemRegion::HeapSpaceRegionKind:
125
0
    case MemRegion::UnknownSpaceRegionKind:
126
0
    case MemRegion::StaticGlobalSpaceRegionKind:
127
0
    case MemRegion::GlobalInternalSpaceRegionKind:
128
0
    case MemRegion::GlobalSystemSpaceRegionKind:
129
0
    case MemRegion::GlobalImmutableSpaceRegionKind: {
130
0
      llvm_unreachable("Invalid region cast");
131
0
    }
132
133
2
    case MemRegion::FunctionCodeRegionKind:
134
2
    case MemRegion::BlockCodeRegionKind:
135
2
    case MemRegion::BlockDataRegionKind:
136
2
    case MemRegion::StringRegionKind:
137
      // FIXME: Need to handle arbitrary downcasts.
138
5.81k
    case MemRegion::SymbolicRegionKind:
139
5.83k
    case MemRegion::AllocaRegionKind:
140
5.83k
    case MemRegion::CompoundLiteralRegionKind:
141
5.93k
    case MemRegion::FieldRegionKind:
142
5.94k
    case MemRegion::ObjCIvarRegionKind:
143
5.94k
    case MemRegion::ObjCStringRegionKind:
144
6.48k
    case MemRegion::NonParamVarRegionKind:
145
6.48k
    case MemRegion::ParamVarRegionKind:
146
6.48k
    case MemRegion::CXXTempObjectRegionKind:
147
6.48k
    case MemRegion::CXXLifetimeExtendedObjectRegionKind:
148
6.49k
    case MemRegion::CXXBaseObjectRegionKind:
149
6.49k
    case MemRegion::CXXDerivedObjectRegionKind:
150
6.49k
      return MakeElementRegion(cast<SubRegion>(R), PointeeTy);
151
152
515
    case MemRegion::ElementRegionKind: {
153
      // If we are casting from an ElementRegion to another type, the
154
      // algorithm is as follows:
155
      //
156
      // (1) Compute the "raw offset" of the ElementRegion from the
157
      //     base region.  This is done by calling 'getAsRawOffset()'.
158
      //
159
      // (2a) If we get a 'RegionRawOffset' after calling
160
      //      'getAsRawOffset()', determine if the absolute offset
161
      //      can be exactly divided into chunks of the size of the
162
      //      casted-pointee type.  If so, create a new ElementRegion with
163
      //      the pointee-cast type as the new ElementType and the index
164
      //      being the offset divded by the chunk size.  If not, create
165
      //      a new ElementRegion at offset 0 off the raw offset region.
166
      //
167
      // (2b) If we don't a get a 'RegionRawOffset' after calling
168
      //      'getAsRawOffset()', it means that we are at offset 0.
169
      //
170
      // FIXME: Handle symbolic raw offsets.
171
172
515
      const ElementRegion *elementR = cast<ElementRegion>(R);
173
515
      const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
174
515
      const MemRegion *baseR = rawOff.getRegion();
175
176
      // If we cannot compute a raw offset, throw up our hands and return
177
      // a NULL MemRegion*.
178
515
      if (!baseR)
179
13
        return std::nullopt;
180
181
502
      CharUnits off = rawOff.getOffset();
182
183
502
      if (off.isZero()) {
184
        // Edge case: we are at 0 bytes off the beginning of baseR. We check to
185
        // see if the type we are casting to is the same as the type of the base
186
        // region. If so, just return the base region.
187
231
        if (IsSameRegionType(baseR, CanonPointeeTy))
188
6
          return baseR;
189
        // Otherwise, create a new ElementRegion at offset 0.
190
225
        return MakeElementRegion(cast<SubRegion>(baseR), PointeeTy);
191
231
      }
192
193
      // We have a non-zero offset from the base region.  We want to determine
194
      // if the offset can be evenly divided by sizeof(PointeeTy).  If so,
195
      // we create an ElementRegion whose index is that value.  Otherwise, we
196
      // create two ElementRegions, one that reflects a raw offset and the other
197
      // that reflects the cast.
198
199
      // Compute the index for the new ElementRegion.
200
271
      int64_t newIndex = 0;
201
271
      const MemRegion *newSuperR = nullptr;
202
203
      // We can only compute sizeof(PointeeTy) if it is a complete type.
204
271
      if (!PointeeTy->isIncompleteType()) {
205
        // Compute the size in **bytes**.
206
269
        CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
207
269
        if (!pointeeTySize.isZero()) {
208
          // Is the offset a multiple of the size?  If so, we can layer the
209
          // ElementRegion (with elementType == PointeeTy) directly on top of
210
          // the base region.
211
267
          if (off % pointeeTySize == 0) {
212
243
            newIndex = off / pointeeTySize;
213
243
            newSuperR = baseR;
214
243
          }
215
267
        }
216
269
      }
217
218
271
      if (!newSuperR) {
219
        // Create an intermediate ElementRegion to represent the raw byte.
220
        // This will be the super region of the final ElementRegion.
221
28
        newSuperR = MakeElementRegion(cast<SubRegion>(baseR), Ctx.CharTy,
222
28
                                      off.getQuantity());
223
28
      }
224
225
271
      return MakeElementRegion(cast<SubRegion>(newSuperR), PointeeTy, newIndex);
226
502
    }
227
7.00k
  }
228
229
0
  llvm_unreachable("unreachable");
230
0
}
231
232
856
static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
233
856
  const MemRegion *MR = V.getAsRegion();
234
856
  if (!MR)
235
7
    return true;
236
237
849
  const auto *TVR = dyn_cast<TypedValueRegion>(MR);
238
849
  if (!TVR)
239
245
    return true;
240
241
604
  const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl();
242
604
  if (!RD)
243
0
    return true;
244
245
604
  const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl();
246
604
  if (!Expected)
247
440
    Expected = Ty->getAsCXXRecordDecl();
248
249
604
  return Expected->getCanonicalDecl() == RD->getCanonicalDecl();
250
604
}
251
252
856
SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
253
  // Early return to avoid doing the wrong thing in the face of
254
  // reinterpret_cast.
255
856
  if (!regionMatchesCXXRecordType(Derived, Cast->getSubExpr()->getType()))
256
1
    return UnknownVal();
257
258
  // Walk through the cast path to create nested CXXBaseRegions.
259
855
  SVal Result = Derived;
260
952
  for (const CXXBaseSpecifier *Base : Cast->path()) {
261
952
    Result = evalDerivedToBase(Result, Base->getType(), Base->isVirtual());
262
952
  }
263
855
  return Result;
264
856
}
265
266
10
SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
267
  // Walk through the path to create nested CXXBaseRegions.
268
10
  SVal Result = Derived;
269
10
  for (const auto &I : Path)
270
12
    Result = evalDerivedToBase(Result, I.Base->getType(),
271
12
                               I.Base->isVirtual());
272
10
  return Result;
273
10
}
274
275
SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
276
1.85k
                                     bool IsVirtual) {
277
1.85k
  const MemRegion *DerivedReg = Derived.getAsRegion();
278
1.85k
  if (!DerivedReg)
279
11
    return Derived;
280
281
1.84k
  const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
282
1.84k
  if (!BaseDecl)
283
1.84k
    BaseDecl = BaseType->getAsCXXRecordDecl();
284
1.84k
  assert(BaseDecl && "not a C++ object?");
285
286
1.84k
  if (const auto *AlreadyDerivedReg =
287
1.84k
          dyn_cast<CXXDerivedObjectRegion>(DerivedReg)) {
288
13
    if (const auto *SR =
289
13
            dyn_cast<SymbolicRegion>(AlreadyDerivedReg->getSuperRegion()))
290
13
      if (SR->getSymbol()->getType()->getPointeeCXXRecordDecl() == BaseDecl)
291
10
        return loc::MemRegionVal(SR);
292
293
3
    DerivedReg = AlreadyDerivedReg->getSuperRegion();
294
3
  }
295
296
1.83k
  const MemRegion *BaseReg = MRMgr.getCXXBaseObjectRegion(
297
1.83k
      BaseDecl, cast<SubRegion>(DerivedReg), IsVirtual);
298
299
1.83k
  return loc::MemRegionVal(BaseReg);
300
1.84k
}
301
302
/// Returns the static type of the given region, if it represents a C++ class
303
/// object.
304
///
305
/// This handles both fully-typed regions, where the dynamic type is known, and
306
/// symbolic regions, where the dynamic type is merely bounded (and even then,
307
/// only ostensibly!), but does not take advantage of any dynamic type info.
308
272
static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
309
272
  if (const auto *TVR = dyn_cast<TypedValueRegion>(MR))
310
179
    return TVR->getValueType()->getAsCXXRecordDecl();
311
93
  if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
312
93
    return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
313
0
  return nullptr;
314
93
}
315
316
std::optional<SVal> StoreManager::evalBaseToDerived(SVal Base,
317
144
                                                    QualType TargetType) {
318
144
  const MemRegion *MR = Base.getAsRegion();
319
144
  if (!MR)
320
0
    return UnknownVal();
321
322
  // Assume the derived class is a pointer or a reference to a CXX record.
323
144
  TargetType = TargetType->getPointeeType();
324
144
  assert(!TargetType.isNull());
325
144
  const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl();
326
144
  if (!TargetClass && 
!TargetType->isVoidType()1
)
327
0
    return UnknownVal();
328
329
  // Drill down the CXXBaseObject chains, which represent upcasts (casts from
330
  // derived to base).
331
272
  
while (const CXXRecordDecl *144
MRClass = getCXXRecordType(MR)) {
332
    // If found the derived class, the cast succeeds.
333
270
    if (MRClass == TargetClass)
334
92
      return loc::MemRegionVal(MR);
335
336
    // We skip over incomplete types. They must be the result of an earlier
337
    // reinterpret_cast, as one can only dynamic_cast between types in the same
338
    // class hierarchy.
339
178
    if (!TargetType->isVoidType() && 
MRClass->hasDefinition()177
) {
340
      // Static upcasts are marked as DerivedToBase casts by Sema, so this will
341
      // only happen when multiple or virtual inheritance is involved.
342
174
      CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
343
174
                         /*DetectVirtual=*/false);
344
174
      if (MRClass->isDerivedFrom(TargetClass, Paths))
345
9
        return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
346
174
    }
347
348
169
    if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
349
      // Drill down the chain to get the derived classes.
350
119
      MR = BaseR->getSuperRegion();
351
119
      continue;
352
119
    }
353
354
    // If this is a cast to void*, return the region.
355
50
    if (TargetType->isVoidType())
356
1
      return loc::MemRegionVal(MR);
357
358
    // Strange use of reinterpret_cast can give us paths we don't reason
359
    // about well, by putting in ElementRegions where we'd expect
360
    // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the
361
    // derived class has a zero offset from the base class), then it's safe
362
    // to strip the cast; if it's invalid, -Wreinterpret-base-class should
363
    // catch it. In the interest of performance, the analyzer will silently
364
    // do the wrong thing in the invalid case (because offsets for subregions
365
    // will be wrong).
366
49
    const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
367
49
    if (Uncasted == MR) {
368
      // We reached the bottom of the hierarchy and did not find the derived
369
      // class. We must be casting the base to derived, so the cast should
370
      // fail.
371
40
      break;
372
40
    }
373
374
9
    MR = Uncasted;
375
9
  }
376
377
  // If we're casting a symbolic base pointer to a derived class, use
378
  // CXXDerivedObjectRegion to represent the cast. If it's a pointer to an
379
  // unrelated type, it must be a weird reinterpret_cast and we have to
380
  // be fine with ElementRegion. TODO: Should we instead make
381
  // Derived{TargetClass, Element{SourceClass, SR}}?
382
42
  if (const auto *SR = dyn_cast<SymbolicRegion>(MR)) {
383
30
    QualType T = SR->getSymbol()->getType();
384
30
    const CXXRecordDecl *SourceClass = T->getPointeeCXXRecordDecl();
385
30
    if (TargetClass && SourceClass && 
TargetClass->isDerivedFrom(SourceClass)29
)
386
25
      return loc::MemRegionVal(
387
25
          MRMgr.getCXXDerivedObjectRegion(TargetClass, SR));
388
5
    return loc::MemRegionVal(GetElementZeroRegion(SR, TargetType));
389
30
  }
390
391
  // We failed if the region we ended up with has perfect type info.
392
12
  if (isa<TypedValueRegion>(MR))
393
12
    return std::nullopt;
394
395
0
  return UnknownVal();
396
12
}
397
398
57.5k
SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
399
57.5k
  if (Base.isUnknownOrUndef())
400
13
    return Base;
401
402
57.5k
  Loc BaseL = Base.castAs<Loc>();
403
57.5k
  const SubRegion* BaseR = nullptr;
404
405
57.5k
  switch (BaseL.getSubKind()) {
406
57.4k
  case loc::MemRegionValKind:
407
57.4k
    BaseR = cast<SubRegion>(BaseL.castAs<loc::MemRegionVal>().getRegion());
408
57.4k
    break;
409
410
0
  case loc::GotoLabelKind:
411
    // These are anormal cases. Flag an undefined value.
412
0
    return UndefinedVal();
413
414
65
  case loc::ConcreteIntKind:
415
    // While these seem funny, this can happen through casts.
416
    // FIXME: What we should return is the field offset, not base. For example,
417
    //  add the field offset to the integer value.  That way things
418
    //  like this work properly:  &(((struct foo *) 0xa)->f)
419
    //  However, that's not easy to fix without reducing our abilities
420
    //  to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7
421
    //  is a null dereference even though we're dereferencing offset of f
422
    //  rather than null. Coming up with an approach that computes offsets
423
    //  over null pointers properly while still being able to catch null
424
    //  dereferences might be worth it.
425
65
    return Base;
426
427
0
  default:
428
0
    llvm_unreachable("Unhandled Base.");
429
57.5k
  }
430
431
  // NOTE: We must have this check first because ObjCIvarDecl is a subclass
432
  // of FieldDecl.
433
57.4k
  if (const auto *ID = dyn_cast<ObjCIvarDecl>(D))
434
1.46k
    return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
435
436
56.0k
  return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
437
57.4k
}
438
439
1.48k
SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
440
1.48k
  return getLValueFieldOrIvar(decl, base);
441
1.48k
}
442
443
SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
444
18.2k
                                    SVal Base) {
445
446
  // Special case, if index is 0, return the same type as if
447
  // this was not an array dereference.
448
18.2k
  if (Offset.isZeroConstant()) {
449
2.17k
    QualType BT = Base.getType(this->Ctx);
450
2.17k
    if (!BT.isNull() && 
!elementType.isNull()2.17k
) {
451
2.17k
      QualType PointeeTy = BT->getPointeeType();
452
2.17k
      if (!PointeeTy.isNull() &&
453
2.17k
          
PointeeTy.getCanonicalType() == elementType.getCanonicalType()2.15k
)
454
1.69k
        return Base;
455
2.17k
    }
456
2.17k
  }
457
458
  // If the base is an unknown or undefined value, just return it back.
459
  // FIXME: For absolute pointer addresses, we just return that value back as
460
  //  well, although in reality we should return the offset added to that
461
  //  value. See also the similar FIXME in getLValueFieldOrIvar().
462
16.5k
  if (Base.isUnknownOrUndef() || 
isa<loc::ConcreteInt>(Base)16.5k
)
463
69
    return Base;
464
465
16.5k
  if (isa<loc::GotoLabel>(Base))
466
2
    return UnknownVal();
467
468
16.5k
  const SubRegion *BaseRegion =
469
16.5k
      Base.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>();
470
471
  // Pointer of any type can be cast and used as array base.
472
16.5k
  const auto *ElemR = dyn_cast<ElementRegion>(BaseRegion);
473
474
  // Convert the offset to the appropriate size and signedness.
475
16.5k
  Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
476
477
16.5k
  if (!ElemR) {
478
    // If the base region is not an ElementRegion, create one.
479
    // This can happen in the following example:
480
    //
481
    //   char *p = __builtin_alloc(10);
482
    //   p[1] = 8;
483
    //
484
    //  Observe that 'p' binds to an AllocaRegion.
485
11.5k
    return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
486
11.5k
                                                    BaseRegion, Ctx));
487
11.5k
  }
488
489
4.98k
  SVal BaseIdx = ElemR->getIndex();
490
491
4.98k
  if (!isa<nonloc::ConcreteInt>(BaseIdx))
492
28
    return UnknownVal();
493
494
4.96k
  const llvm::APSInt &BaseIdxI =
495
4.96k
      BaseIdx.castAs<nonloc::ConcreteInt>().getValue();
496
497
  // Only allow non-integer offsets if the base region has no offset itself.
498
  // FIXME: This is a somewhat arbitrary restriction. We should be using
499
  // SValBuilder here to add the two offsets without checking their types.
500
4.96k
  if (!isa<nonloc::ConcreteInt>(Offset)) {
501
335
    if (isa<ElementRegion>(BaseRegion->StripCasts()))
502
0
      return UnknownVal();
503
504
335
    return loc::MemRegionVal(MRMgr.getElementRegion(
505
335
        elementType, Offset, cast<SubRegion>(ElemR->getSuperRegion()), Ctx));
506
335
  }
507
508
4.62k
  const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue();
509
4.62k
  assert(BaseIdxI.isSigned());
510
511
  // Compute the new index.
512
4.62k
  nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
513
4.62k
                                                                    OffI));
514
515
  // Construct the new ElementRegion.
516
4.62k
  const SubRegion *ArrayR = cast<SubRegion>(ElemR->getSuperRegion());
517
4.62k
  return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
518
4.62k
                                                  Ctx));
519
4.62k
}
520
521
75.6k
StoreManager::BindingsHandler::~BindingsHandler() = default;
522
523
bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
524
                                                    Store store,
525
                                                    const MemRegion* R,
526
17.8k
                                                    SVal val) {
527
17.8k
  SymbolRef SymV = val.getAsLocSymbol();
528
17.8k
  if (!SymV || 
SymV != Sym11.4k
)
529
10.7k
    return true;
530
531
7.15k
  if (Binding) {
532
638
    First = false;
533
638
    return false;
534
638
  }
535
6.51k
  else
536
6.51k
    Binding = R;
537
538
6.51k
  return true;
539
7.15k
}