1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/gn/scheduler.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
#include "tools/gn/standard_out.h"
Scheduler* g_scheduler = NULL;
namespace {
int GetThreadCount() {
std::string thread_count =
CommandLine::ForCurrentProcess()->GetSwitchValueASCII("threads");
int result;
if (thread_count.empty() || !base::StringToInt(thread_count, &result))
return 32;
return result;
}
} // namespace
Scheduler::Scheduler()
: pool_(new base::SequencedWorkerPool(GetThreadCount(), "worker_")),
input_file_manager_(new InputFileManager),
verbose_logging_(false),
work_count_(0),
is_failed_(false),
has_been_shutdown_(false) {
g_scheduler = this;
}
Scheduler::~Scheduler() {
if (!has_been_shutdown_)
pool_->Shutdown();
g_scheduler = NULL;
}
bool Scheduler::Run() {
runner_.Run();
bool local_is_failed;
{
base::AutoLock lock(lock_);
local_is_failed = is_failed();
has_been_shutdown_ = true;
}
// Don't do this inside the lock since it will block on the workers, which
// may be in turn waiting on the lock.
pool_->Shutdown();
return !local_is_failed;
}
void Scheduler::Log(const std::string& verb, const std::string& msg) {
if (base::MessageLoop::current() == &main_loop_) {
LogOnMainThread(verb, msg);
} else {
// The run loop always joins on the sub threads, so the lifetime of this
// object outlives the invocations of this function, hence "unretained".
main_loop_.PostTask(FROM_HERE,
base::Bind(&Scheduler::LogOnMainThread,
base::Unretained(this), verb, msg));
}
}
void Scheduler::FailWithError(const Err& err) {
DCHECK(err.has_error());
{
base::AutoLock lock(lock_);
if (is_failed_ || has_been_shutdown_)
return; // Ignore errors once we see one.
is_failed_ = true;
}
if (base::MessageLoop::current() == &main_loop_) {
FailWithErrorOnMainThread(err);
} else {
// The run loop always joins on the sub threads, so the lifetime of this
// object outlives the invocations of this function, hence "unretained".
main_loop_.PostTask(FROM_HERE,
base::Bind(&Scheduler::FailWithErrorOnMainThread,
base::Unretained(this), err));
}
}
void Scheduler::ScheduleWork(const base::Closure& work) {
IncrementWorkCount();
pool_->PostWorkerTaskWithShutdownBehavior(
FROM_HERE, base::Bind(&Scheduler::DoWork,
base::Unretained(this), work),
base::SequencedWorkerPool::BLOCK_SHUTDOWN);
}
void Scheduler::AddGenDependency(const base::FilePath& file) {
base::AutoLock lock(lock_);
gen_dependencies_.push_back(file);
}
std::vector<base::FilePath> Scheduler::GetGenDependencies() const {
base::AutoLock lock(lock_);
return gen_dependencies_;
}
void Scheduler::IncrementWorkCount() {
base::AtomicRefCountInc(&work_count_);
}
void Scheduler::DecrementWorkCount() {
if (!base::AtomicRefCountDec(&work_count_)) {
if (base::MessageLoop::current() == &main_loop_) {
OnComplete();
} else {
main_loop_.PostTask(FROM_HERE,
base::Bind(&Scheduler::OnComplete,
base::Unretained(this)));
}
}
}
void Scheduler::LogOnMainThread(const std::string& verb,
const std::string& msg) {
OutputString(verb, DECORATION_YELLOW);
OutputString(" " + msg + "\n");
}
void Scheduler::FailWithErrorOnMainThread(const Err& err) {
err.PrintToStdout();
runner_.Quit();
}
void Scheduler::DoWork(const base::Closure& closure) {
closure.Run();
DecrementWorkCount();
}
void Scheduler::OnComplete() {
// Should be called on the main thread.
DCHECK(base::MessageLoop::current() == main_loop());
runner_.Quit();
}
|