Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Support/ThreadPool.cpp
Line
Count
Source
1
//==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements a crude C++11 based thread pool.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "llvm/Support/ThreadPool.h"
14
15
#include "llvm/Config/llvm-config.h"
16
#include "llvm/Support/Threading.h"
17
#include "llvm/Support/raw_ostream.h"
18
19
using namespace llvm;
20
21
#if LLVM_ENABLE_THREADS
22
23
// Default to hardware_concurrency
24
6
ThreadPool::ThreadPool() : ThreadPool(hardware_concurrency()) {}
Unexecuted instantiation: llvm::ThreadPool::ThreadPool()
llvm::ThreadPool::ThreadPool()
Line
Count
Source
24
6
ThreadPool::ThreadPool() : ThreadPool(hardware_concurrency()) {}
25
26
ThreadPool::ThreadPool(unsigned ThreadCount)
27
528
    : ActiveThreads(0), EnableFlag(true) {
28
528
  // Create ThreadCount threads that will loop forever, wait on QueueCondition
29
528
  // for tasks to be queued or the Pool to be destroyed.
30
528
  Threads.reserve(ThreadCount);
31
1.48k
  for (unsigned ThreadID = 0; ThreadID < ThreadCount; 
++ThreadID956
) {
32
956
    Threads.emplace_back([&] {
33
1.87k
      while (
true1.87k
) {
34
1.87k
        PackagedTaskTy Task;
35
1.87k
        {
36
1.87k
          std::unique_lock<std::mutex> LockGuard(QueueLock);
37
1.87k
          // Wait for tasks to be pushed in the queue
38
1.87k
          QueueCondition.wait(LockGuard,
39
3.26k
                              [&] { return !EnableFlag || 
!Tasks.empty()2.28k
; });
40
1.87k
          // Exit condition
41
1.87k
          if (!EnableFlag && 
Tasks.empty()984
)
42
956
            return;
43
923
          // Yeah, we have a task, grab it and release the lock on the queue
44
923
45
923
          // We first need to signal that we are active before popping the queue
46
923
          // in order for wait() to properly detect that even if the queue is
47
923
          // empty, there is still a task in flight.
48
923
          {
49
923
            std::unique_lock<std::mutex> LockGuard(CompletionLock);
50
923
            ++ActiveThreads;
51
923
          }
52
923
          Task = std::move(Tasks.front());
53
923
          Tasks.pop();
54
923
        }
55
923
        // Run the task we just grabbed
56
923
        Task();
57
923
58
923
        {
59
923
          // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
60
923
          std::unique_lock<std::mutex> LockGuard(CompletionLock);
61
923
          --ActiveThreads;
62
923
        }
63
923
64
923
        // Notify task completion, in case someone waits on ThreadPool::wait()
65
923
        CompletionCondition.notify_all();
66
923
      }
67
954
    });
68
956
  }
69
528
}
70
71
501
void ThreadPool::wait() {
72
501
  // Wait for all threads to complete and the queue to be empty
73
501
  std::unique_lock<std::mutex> LockGuard(CompletionLock);
74
501
  // The order of the checks for ActiveThreads and Tasks.empty() matters because
75
501
  // any active threads might be modifying the Tasks queue, and this would be a
76
501
  // race.
77
501
  CompletionCondition.wait(LockGuard,
78
1.09k
                           [&] { return !ActiveThreads && 
Tasks.empty()622
; });
79
501
}
80
81
925
std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
82
925
  /// Wrap the Task in a packaged_task to return a future object.
83
925
  PackagedTaskTy PackagedTask(std::move(Task));
84
925
  auto Future = PackagedTask.get_future();
85
925
  {
86
925
    // Lock the queue and push the new task
87
925
    std::unique_lock<std::mutex> LockGuard(QueueLock);
88
925
89
925
    // Don't allow enqueueing after disabling the pool
90
925
    assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
91
925
92
925
    Tasks.push(std::move(PackagedTask));
93
925
  }
94
925
  QueueCondition.notify_one();
95
925
  return Future.share();
96
925
}
97
98
// The destructor joins all threads, waiting for completion.
99
528
ThreadPool::~ThreadPool() {
100
528
  {
101
528
    std::unique_lock<std::mutex> LockGuard(QueueLock);
102
528
    EnableFlag = false;
103
528
  }
104
528
  QueueCondition.notify_all();
105
528
  for (auto &Worker : Threads)
106
956
    Worker.join();
107
528
}
108
109
#else // LLVM_ENABLE_THREADS Disabled
110
111
ThreadPool::ThreadPool() : ThreadPool(0) {}
112
113
// No threads are launched, issue a warning if ThreadCount is not 0
114
ThreadPool::ThreadPool(unsigned ThreadCount)
115
    : ActiveThreads(0) {
116
  if (ThreadCount) {
117
    errs() << "Warning: request a ThreadPool with " << ThreadCount
118
           << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
119
  }
120
}
121
122
void ThreadPool::wait() {
123
  // Sequential implementation running the tasks
124
  while (!Tasks.empty()) {
125
    auto Task = std::move(Tasks.front());
126
    Tasks.pop();
127
    Task();
128
  }
129
}
130
131
std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
132
  // Get a Future with launch::deferred execution using std::async
133
  auto Future = std::async(std::launch::deferred, std::move(Task)).share();
134
  // Wrap the future so that both ThreadPool::wait() can operate and the
135
  // returned future can be sync'ed on.
136
  PackagedTaskTy PackagedTask([Future]() { Future.get(); });
137
  Tasks.push(std::move(PackagedTask));
138
  return Future;
139
}
140
141
ThreadPool::~ThreadPool() {
142
  wait();
143
}
144
145
#endif