1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/message_loop/message_pump_glib.h"
6
7 #include <fcntl.h>
8 #include <glib.h>
9 #include <math.h>
10
11 #include "base/logging.h"
12 #include "base/memory/raw_ptr.h"
13 #include "base/notreached.h"
14 #include "base/numerics/safe_conversions.h"
15 #include "base/posix/eintr_wrapper.h"
16 #include "base/synchronization/lock.h"
17 #include "base/threading/platform_thread.h"
18
19 namespace base {
20
21 namespace {
22
23 // Priorities of event sources are important to let everything be processed.
24 // In particular, GTK event source should have the highest priority (because
25 // UI events come from it), then Wayland events (the ones coming from the FD
26 // watcher), and the lowest priority is GLib events (our base message pump).
27 //
28 // The g_source API uses ints to denote priorities, and the lower is its value,
29 // the higher is the priority (i.e., they are ordered backwards).
30 constexpr int kPriorityWork = G_PRIORITY_DEFAULT_IDLE;
31 constexpr int kPriorityFdWatch = G_PRIORITY_DEFAULT_IDLE - 10;
32
33 // See the explanation above.
34 static_assert(G_PRIORITY_DEFAULT < kPriorityFdWatch &&
35 kPriorityFdWatch < kPriorityWork,
36 "Wrong priorities are set for event sources!");
37
38 // Return a timeout suitable for the glib loop according to |next_task_time|, -1
39 // to block forever, 0 to return right away, or a timeout in milliseconds from
40 // now.
GetTimeIntervalMilliseconds(TimeTicks next_task_time)41 int GetTimeIntervalMilliseconds(TimeTicks next_task_time) {
42 if (next_task_time.is_null())
43 return 0;
44 else if (next_task_time.is_max())
45 return -1;
46
47 auto timeout_ms =
48 (next_task_time - TimeTicks::Now()).InMillisecondsRoundedUp();
49
50 return timeout_ms < 0 ? 0 : saturated_cast<int>(timeout_ms);
51 }
52
RunningOnMainThread()53 bool RunningOnMainThread() {
54 auto pid = getpid();
55 auto tid = PlatformThread::CurrentId();
56 return pid > 0 && tid > 0 && pid == tid;
57 }
58
59 // A brief refresher on GLib:
60 // GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
61 // On each iteration of the GLib pump, it calls each source's Prepare function.
62 // This function should return TRUE if it wants GLib to call its Dispatch, and
63 // FALSE otherwise. It can also set a timeout in this case for the next time
64 // Prepare should be called again (it may be called sooner).
65 // After the Prepare calls, GLib does a poll to check for events from the
66 // system. File descriptors can be attached to the sources. The poll may block
67 // if none of the Prepare calls returned TRUE. It will block indefinitely, or
68 // by the minimum time returned by a source in Prepare.
69 // After the poll, GLib calls Check for each source that returned FALSE
70 // from Prepare. The return value of Check has the same meaning as for Prepare,
71 // making Check a second chance to tell GLib we are ready for Dispatch.
72 // Finally, GLib calls Dispatch for each source that is ready. If Dispatch
73 // returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
74 // (i.e., you can call Run from them), but Prepare and Check cannot.
75 // Finalize is called when the source is destroyed.
76 // NOTE: It is common for subsystems to want to process pending events while
77 // doing intensive work, for example the flash plugin. They usually use the
78 // following pattern (recommended by the GTK docs):
79 // while (gtk_events_pending()) {
80 // gtk_main_iteration();
81 // }
82 //
83 // gtk_events_pending just calls g_main_context_pending, which does the
84 // following:
85 // - Call prepare on all the sources.
86 // - Do the poll with a timeout of 0 (not blocking).
87 // - Call check on all the sources.
88 // - *Does not* call dispatch on the sources.
89 // - Return true if any of prepare() or check() returned true.
90 //
91 // gtk_main_iteration just calls g_main_context_iteration, which does the whole
92 // thing, respecting the timeout for the poll (and block, although it is to if
93 // gtk_events_pending returned true), and call dispatch.
94 //
95 // Thus it is important to only return true from prepare or check if we
96 // actually have events or work to do. We also need to make sure we keep
97 // internal state consistent so that if prepare/check return true when called
98 // from gtk_events_pending, they will still return true when called right
99 // after, from gtk_main_iteration.
100 //
101 // For the GLib pump we try to follow the Windows UI pump model:
102 // - Whenever we receive a wakeup event or the timer for delayed work expires,
103 // we run DoWork. That part will also run in the other event pumps.
104 // - We also run DoWork, and possibly DoIdleWork, in the main loop,
105 // around event handling.
106 //
107 // ---------------------------------------------------------------------------
108 //
109 // An overview on the way that we track work items:
110 //
111 // ScopedDoWorkItems are used by this pump to track native work. They are
112 // stored by value in |state_| and are set/cleared as the pump runs. Their
113 // setting and clearing is done in the functions
114 // {Set,Clear,EnsureSet,EnsureCleared}ScopedWorkItem. Control flow in GLib is
115 // quite non-obvious because chrome is not notified when a nested loop is
116 // entered/exited. To detect nested loops, MessagePumpGlib uses
117 // |state_->do_work_depth| which is incremented when DoWork is entered, and a
118 // GLib library function, g_main_depth(), which indicates the current number of
119 // Dispatch() calls on the stack. To react to them, two separate
120 // ScopedDoWorkItems are used (a standard one used for all native work, and a
121 // second one used exclusively for forcing nesting when there is a native loop
122 // spinning). Note that `ThreadController` flags all nesting as
123 // `Phase::kNested` so separating native and application work while nested isn't
124 // supported nor a goal.
125 //
126 // It should also be noted that a second GSource has been added to GLib,
127 // referred to as the "observer" source. It is used because in the case where
128 // native work occurs on wakeup that is higher priority than Chrome (all of
129 // GTK), chrome won't even get notified that the pump is awake.
130 //
131 // There are several cases to consider wrt. nesting level and order. In
132 // order, we have:
133 // A. [root] -> MessagePump::Run() -> native event -> g_main_context_iteration
134 // B. [root] -> MessagePump::Run() -> DoWork -> g_main_context_iteration
135 // C. [root] -> native -> DoWork -> MessagePump -> [...]
136 // The second two cases are identical for our purposes, and the last one turns
137 // out to be handled without any extra headache.
138 //
139 // Consider nesting case A, where native work is called from
140 // |g_main_context_iteration()| from the pump, and that native work spins up a
141 // loop. For our purposes, this is a nested loop, because control is not
142 // returned to the pump once one iteration of the pump is complete. In this
143 // case, the pump needs to enter nesting without DoWork being involved at
144 // all. This is accomplished using |MessagePumpGlib::NestIfRequired()|, which is
145 // called during the Prepare() phase of GLib. As the pump records state on entry
146 // and exit from GLib using |OnEntryToGlib| and |OnExitFromGlib|, we can compare
147 // |g_main_depth| at |HandlePrepare| with the one before we entered
148 // |g_main_context_iteration|. If it is higher, there is a native loop being
149 // spun, and |RegisterNesting| is called, forcing nesting by initializing two
150 // work items at once. These are destroyed after the exit from
151 // |g_main_context_iteration| using |OnExitFromGlib|.
152 //
153 // Then, considering nesting case B, |state_->do_work_depth| is incremented
154 // during any Chrome work, to allow the pump to detect re-entrancy during a
155 // chrome work item. This is required because `g_main_depth` is not incremented
156 // in any `DoWork` call not occuring during `Dispatch()` (i.e. during
157 // `MessagePumpGlib::Run()`). In this case, a nested loop is recorded, and the
158 // pump sets-and-clears scoped work items during Prepare, Check, and Dispatch. A
159 // work item can never be active when control flow returns to GLib (i.e. on
160 // return) during a nested loop, because the nested loop could exit at any
161 // point. This is fine because TimeKeeper is only concerned with the fact that a
162 // nested loop is in progress, as opposed to the various phases of the nested
163 // loop.
164 //
165 // Finally, consider nesting case C, where a native loop is spinning
166 // entirely outside of Chrome, such as inside a signal handler, the pump might
167 // create and destroy DoWorkItems during Prepare() and Check(), but these work
168 // items will always get cleared during Dispatch(), before the pump enters a
169 // DoWork(), leading to the pump showing non-nested native work without the
170 // thread controller being active, the correct situation (which won't occur
171 // outside of startup or shutdown). Once Dispatch() is called, the pump's
172 // nesting tracking works correctly, as state_->do_work_depth is increased, and
173 // upon re-entrancy we detect the nested loop, which is correct, as this is the
174 // only point at which the loop actually becomes "nested".
175 //
176 // -----------------------------------------------------------------------------
177 //
178 // As an overview of the steps taken by MessagePumpGLib to ensure that nested
179 // loops are detected adequately during each phase of the GLib loop:
180 //
181 // 0: Before entering GLib:
182 // 0.1: Record state about current state of GLib (g_main_depth()) for
183 // case 1.1.2.
184 //
185 // 1: Prepare.
186 // 1.1: Detection of nested loops
187
188 // 1.1.1: If |state_->do_work_depth| > 0, we are in nesting case B detailed
189 // above. A work item must be newly created during this function to
190 // trigger nesting, and is destroyed to ensure proper destruction order
191 // in the case where GLib quits after Prepare().
192 //
193 // 1.1.2: Otherwise, check if we are in nesting case A above. If yes, trigger
194 // nesting using ScopedDoWorkItems. The nesting will be cleared at exit
195 // from GLib.
196 //
197 // This check occurs only in |HandleObserverPrepare|, not in
198 // |HandlePrepare|.
199 //
200 // A third party is running a glib message loop. Since Chrome work is
201 // registered with GLib at |G_PRIORITY_DEFAULT_IDLE|, a relatively low
202 // priority, sources of default-or-higher priority will be Dispatch()ed
203 // first. Since only one source is Dispatched per loop iteration,
204 // |HandlePrepare| can get called several times in a row in the case that
205 // there are any other events in the queue. A ScopedDoWorkItem is created
206 // and destroyed to record this. That work item triggers nesting.
207 //
208 // 1.2: Other considerations
209 // 1.2.1: Sleep occurs between Prepare() and Check(). If Chrome will pass a
210 // nonzero poll time to GLib, the inner ScopedDoWorkItem is cleared and
211 // BeforeWait() is called. In nesting case A, the nesting work item will
212 // not be cleared. A nested loop will typically not block.
213 //
214 // Since Prepare() is called before Check() in all cases, the bulk of
215 // nesting detection is done in Prepare().
216 //
217 // 2: Check.
218 // 2.1: Detection of nested loops:
219 // 2.1.1: In nesting case B, |ClearScopedWorkItem()| on exit. A third party is
220 // running a glib message loop. It is possible that at any point the
221 // nested message loop will quit. In this case, we don't want to leave a
222 // nested DoWorkItem on the stack.
223 //
224 // 2.2: Other considerations
225 // 2.2.1: A ScopedDoWorkItem may be created (if it was not already present) at
226 // the entry to Check() to record a wakeup in the case that the pump
227 // slept. It is important to note that this occurs both in
228 // |HandleObserverCheck| and |HandleCheck| to ensure that at every point
229 // as the pump enters the Dispatch phase it is awake. In the case it is
230 // already awake, this is a very cheap operation.
231 //
232 // 3: Dispatch
233 // 3.1 Detection of nested loops
234 // 3.1.1: |state_->do_work_depth| is incremented on entry and decremented on
235 // exit. This is used to detect nesting case B.
236 //
237 // 3.1.2: Nested loops can be quit at any point, and so ScopedDoWorkItems can't
238 // be left on the stack for the same reasons as in 1.1.1/2.1.1.
239 //
240 // 3.2 Other considerations
241 // 3.2.1: Since DoWork creates its own work items, ScopedDoWorkItems are not
242 // used as this would trigger nesting in all cases.
243 //
244 // 4: Post GLib
245 // 4.1: Detection of nested loops
246 // 4.1.1: |state_->do_work_depth| is also increased during the DoWork in Run()
247 // as nesting in that case [calling glib from third party code] needs to
248 // clear all work items after return to avoid improper destruction order.
249 //
250 // 4.2: Other considerations:
251 // 4.2.1: DoWork uses its own work item, so no ScopedDoWorkItems are active in
252 // this case.
253
254 struct WorkSource : public GSource {
255 raw_ptr<MessagePumpGlib> pump;
256 };
257
WorkSourcePrepare(GSource * source,gint * timeout_ms)258 gboolean WorkSourcePrepare(GSource* source, gint* timeout_ms) {
259 *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
260 // We always return FALSE, so that our timeout is honored. If we were
261 // to return TRUE, the timeout would be considered to be 0 and the poll
262 // would never block. Once the poll is finished, Check will be called.
263 return FALSE;
264 }
265
WorkSourceCheck(GSource * source)266 gboolean WorkSourceCheck(GSource* source) {
267 // Only return TRUE if Dispatch should be called.
268 return static_cast<WorkSource*>(source)->pump->HandleCheck();
269 }
270
WorkSourceDispatch(GSource * source,GSourceFunc unused_func,gpointer unused_data)271 gboolean WorkSourceDispatch(GSource* source,
272 GSourceFunc unused_func,
273 gpointer unused_data) {
274 static_cast<WorkSource*>(source)->pump->HandleDispatch();
275 // Always return TRUE so our source stays registered.
276 return TRUE;
277 }
278
WorkSourceFinalize(GSource * source)279 void WorkSourceFinalize(GSource* source) {
280 // Since the WorkSource object memory is managed by glib, WorkSource implicit
281 // destructor is never called, and thus WorkSource's raw_ptr never release
282 // its internal reference on the pump pointer. This leads to adding pressure
283 // to the BRP quarantine.
284 static_cast<WorkSource*>(source)->pump = nullptr;
285 }
286
287 // I wish these could be const, but g_source_new wants non-const.
288 GSourceFuncs g_work_source_funcs = {WorkSourcePrepare, WorkSourceCheck,
289 WorkSourceDispatch, WorkSourceFinalize};
290
291 struct ObserverSource : public GSource {
292 raw_ptr<MessagePumpGlib> pump;
293 };
294
ObserverPrepare(GSource * gsource,gint * timeout_ms)295 gboolean ObserverPrepare(GSource* gsource, gint* timeout_ms) {
296 auto* source = static_cast<ObserverSource*>(gsource);
297 source->pump->HandleObserverPrepare();
298 *timeout_ms = -1;
299 // We always want to poll.
300 return FALSE;
301 }
302
ObserverCheck(GSource * gsource)303 gboolean ObserverCheck(GSource* gsource) {
304 auto* source = static_cast<ObserverSource*>(gsource);
305 return source->pump->HandleObserverCheck();
306 }
307
ObserverFinalize(GSource * source)308 void ObserverFinalize(GSource* source) {
309 // Read the comment in `WorkSourceFinalize`, the issue is exactly the same.
310 static_cast<ObserverSource*>(source)->pump = nullptr;
311 }
312
313 GSourceFuncs g_observer_funcs = {ObserverPrepare, ObserverCheck, nullptr,
314 ObserverFinalize};
315
316 struct FdWatchSource : public GSource {
317 raw_ptr<MessagePumpGlib> pump;
318 raw_ptr<MessagePumpGlib::FdWatchController> controller;
319 };
320
FdWatchSourcePrepare(GSource * source,gint * timeout_ms)321 gboolean FdWatchSourcePrepare(GSource* source, gint* timeout_ms) {
322 *timeout_ms = -1;
323 return FALSE;
324 }
325
FdWatchSourceCheck(GSource * gsource)326 gboolean FdWatchSourceCheck(GSource* gsource) {
327 auto* source = static_cast<FdWatchSource*>(gsource);
328 return source->pump->HandleFdWatchCheck(source->controller) ? TRUE : FALSE;
329 }
330
FdWatchSourceDispatch(GSource * gsource,GSourceFunc unused_func,gpointer unused_data)331 gboolean FdWatchSourceDispatch(GSource* gsource,
332 GSourceFunc unused_func,
333 gpointer unused_data) {
334 auto* source = static_cast<FdWatchSource*>(gsource);
335 source->pump->HandleFdWatchDispatch(source->controller);
336 return TRUE;
337 }
338
FdWatchSourceFinalize(GSource * gsource)339 void FdWatchSourceFinalize(GSource* gsource) {
340 // Read the comment in `WorkSourceFinalize`, the issue is exactly the same.
341 auto* source = static_cast<FdWatchSource*>(gsource);
342 source->pump = nullptr;
343 source->controller = nullptr;
344 }
345
346 GSourceFuncs g_fd_watch_source_funcs = {
347 FdWatchSourcePrepare, FdWatchSourceCheck, FdWatchSourceDispatch,
348 FdWatchSourceFinalize};
349
350 } // namespace
351
352 struct MessagePumpGlib::RunState {
RunStatebase::MessagePumpGlib::RunState353 explicit RunState(Delegate* delegate) : delegate(delegate) {
354 CHECK(delegate);
355 }
356
357 const raw_ptr<Delegate> delegate;
358
359 // Used to flag that the current Run() invocation should return ASAP.
360 bool should_quit = false;
361
362 // Keeps track of the number of calls to DoWork() on the stack for the current
363 // Run() invocation. Used to detect reentrancy from DoWork in order to make
364 // decisions about tracking nested work.
365 int do_work_depth = 0;
366
367 // Value of g_main_depth() captured before the call to
368 // g_main_context_iteration() in Run(). nullopt if Run() is not calling
369 // g_main_context_iteration(). Used to track whether the pump has forced a
370 // nested state due to a native pump.
371 std::optional<int> g_depth_on_iteration;
372
373 // Used to keep track of the native event work items processed by the message
374 // pump.
375 Delegate::ScopedDoWorkItem scoped_do_work_item;
376
377 // Used to force the pump into a nested state when a native runloop was
378 // dispatched from main.
379 Delegate::ScopedDoWorkItem native_loop_do_work_item;
380
381 // The information of the next task available at this run-level. Stored in
382 // RunState because different set of tasks can be accessible at various
383 // run-levels (e.g. non-nestable tasks).
384 Delegate::NextWorkInfo next_work_info;
385 };
386
MessagePumpGlib()387 MessagePumpGlib::MessagePumpGlib()
388 : state_(nullptr), wakeup_gpollfd_(std::make_unique<GPollFD>()) {
389 DCHECK(!g_main_context_get_thread_default());
390 if (RunningOnMainThread()) {
391 context_ = g_main_context_default();
392 } else {
393 owned_context_ = std::unique_ptr<GMainContext, GMainContextDeleter>(
394 g_main_context_new());
395 context_ = owned_context_.get();
396 g_main_context_push_thread_default(context_);
397 }
398
399 // Create our wakeup pipe, which is used to flag when work was scheduled.
400 int fds[2];
401 [[maybe_unused]] int ret = pipe2(fds, O_CLOEXEC);
402 DCHECK_EQ(ret, 0);
403
404 wakeup_pipe_read_ = fds[0];
405 wakeup_pipe_write_ = fds[1];
406 wakeup_gpollfd_->fd = wakeup_pipe_read_;
407 wakeup_gpollfd_->events = G_IO_IN;
408
409 observer_source_ = std::unique_ptr<GSource, GSourceDeleter>(
410 g_source_new(&g_observer_funcs, sizeof(ObserverSource)));
411 static_cast<ObserverSource*>(observer_source_.get())->pump = this;
412 g_source_attach(observer_source_.get(), context_);
413
414 work_source_ = std::unique_ptr<GSource, GSourceDeleter>(
415 g_source_new(&g_work_source_funcs, sizeof(WorkSource)));
416 static_cast<WorkSource*>(work_source_.get())->pump = this;
417 g_source_add_poll(work_source_.get(), wakeup_gpollfd_.get());
418 g_source_set_priority(work_source_.get(), kPriorityWork);
419 // This is needed to allow Run calls inside Dispatch.
420 g_source_set_can_recurse(work_source_.get(), TRUE);
421 g_source_attach(work_source_.get(), context_);
422 }
423
~MessagePumpGlib()424 MessagePumpGlib::~MessagePumpGlib() {
425 work_source_.reset();
426 close(wakeup_pipe_read_);
427 close(wakeup_pipe_write_);
428 context_ = nullptr;
429 owned_context_.reset();
430 }
431
FdWatchController(const Location & location)432 MessagePumpGlib::FdWatchController::FdWatchController(const Location& location)
433 : FdWatchControllerInterface(location) {}
434
~FdWatchController()435 MessagePumpGlib::FdWatchController::~FdWatchController() {
436 if (IsInitialized()) {
437 auto* source = static_cast<FdWatchSource*>(source_);
438 source->controller = nullptr;
439
440 CHECK(StopWatchingFileDescriptor());
441 }
442 if (was_destroyed_) {
443 DCHECK(!*was_destroyed_);
444 *was_destroyed_ = true;
445 }
446 }
447
StopWatchingFileDescriptor()448 bool MessagePumpGlib::FdWatchController::StopWatchingFileDescriptor() {
449 if (!IsInitialized())
450 return false;
451
452 g_source_destroy(source_);
453 g_source_unref(source_.ExtractAsDangling());
454 watcher_ = nullptr;
455 return true;
456 }
457
IsInitialized() const458 bool MessagePumpGlib::FdWatchController::IsInitialized() const {
459 return !!source_;
460 }
461
InitOrUpdate(int fd,int mode,FdWatcher * watcher)462 bool MessagePumpGlib::FdWatchController::InitOrUpdate(int fd,
463 int mode,
464 FdWatcher* watcher) {
465 gushort event_flags = 0;
466 if (mode & WATCH_READ) {
467 event_flags |= G_IO_IN;
468 }
469 if (mode & WATCH_WRITE) {
470 event_flags |= G_IO_OUT;
471 }
472
473 if (!IsInitialized()) {
474 poll_fd_ = std::make_unique<GPollFD>();
475 poll_fd_->fd = fd;
476 } else {
477 if (poll_fd_->fd != fd)
478 return false;
479 // Combine old/new event masks.
480 event_flags |= poll_fd_->events;
481 // Destroy previous source
482 bool stopped = StopWatchingFileDescriptor();
483 DCHECK(stopped);
484 }
485 poll_fd_->events = event_flags;
486 poll_fd_->revents = 0;
487
488 source_ = g_source_new(&g_fd_watch_source_funcs, sizeof(FdWatchSource));
489 DCHECK(source_);
490 g_source_add_poll(source_, poll_fd_.get());
491 g_source_set_can_recurse(source_, TRUE);
492 g_source_set_callback(source_, nullptr, nullptr, nullptr);
493 g_source_set_priority(source_, kPriorityFdWatch);
494
495 watcher_ = watcher;
496 return true;
497 }
498
Attach(MessagePumpGlib * pump)499 bool MessagePumpGlib::FdWatchController::Attach(MessagePumpGlib* pump) {
500 DCHECK(pump);
501 if (!IsInitialized()) {
502 return false;
503 }
504 auto* source = static_cast<FdWatchSource*>(source_);
505 source->controller = this;
506 source->pump = pump;
507 g_source_attach(source_, pump->context_);
508 return true;
509 }
510
NotifyCanRead()511 void MessagePumpGlib::FdWatchController::NotifyCanRead() {
512 if (!watcher_)
513 return;
514 DCHECK(poll_fd_);
515 watcher_->OnFileCanReadWithoutBlocking(poll_fd_->fd);
516 }
517
NotifyCanWrite()518 void MessagePumpGlib::FdWatchController::NotifyCanWrite() {
519 if (!watcher_)
520 return;
521 DCHECK(poll_fd_);
522 watcher_->OnFileCanWriteWithoutBlocking(poll_fd_->fd);
523 }
524
WatchFileDescriptor(int fd,bool persistent,int mode,FdWatchController * controller,FdWatcher * watcher)525 bool MessagePumpGlib::WatchFileDescriptor(int fd,
526 bool persistent,
527 int mode,
528 FdWatchController* controller,
529 FdWatcher* watcher) {
530 DCHECK_GE(fd, 0);
531 DCHECK(controller);
532 DCHECK(watcher);
533 DCHECK(mode == WATCH_READ || mode == WATCH_WRITE || mode == WATCH_READ_WRITE);
534 // WatchFileDescriptor should be called on the pump thread. It is not
535 // threadsafe, so the watcher may never be registered.
536 DCHECK_CALLED_ON_VALID_THREAD(watch_fd_caller_checker_);
537
538 if (!controller->InitOrUpdate(fd, mode, watcher)) {
539 DPLOG(ERROR) << "FdWatchController init failed (fd=" << fd << ")";
540 return false;
541 }
542 return controller->Attach(this);
543 }
544
HandleObserverPrepare()545 void MessagePumpGlib::HandleObserverPrepare() {
546 // |state_| may be null during tests.
547 if (!state_) {
548 return;
549 }
550
551 if (state_->do_work_depth > 0) {
552 // Contingency 1.1.1 detailed above
553 SetScopedWorkItem();
554 ClearScopedWorkItem();
555 } else {
556 // Contingency 1.1.2 detailed above
557 NestIfRequired();
558 }
559
560 return;
561 }
562
HandleObserverCheck()563 bool MessagePumpGlib::HandleObserverCheck() {
564 // |state_| may be null in tests.
565 if (!state_) {
566 return FALSE;
567 }
568
569 // Make sure we record the fact that we're awake. Chrome won't get Check()ed
570 // if a higher priority work item returns TRUE from Check().
571 EnsureSetScopedWorkItem();
572 if (state_->do_work_depth > 0) {
573 // Contingency 2.1.1
574 ClearScopedWorkItem();
575 }
576
577 // The observer never needs to run anything.
578 return FALSE;
579 }
580
581 // Return the timeout we want passed to poll.
HandlePrepare()582 int MessagePumpGlib::HandlePrepare() {
583 // |state_| may be null during tests.
584 if (!state_)
585 return 0;
586
587 const int next_wakeup_millis =
588 GetTimeIntervalMilliseconds(state_->next_work_info.delayed_run_time);
589 if (next_wakeup_millis != 0) {
590 // When this is called, it is not possible to know for sure if a
591 // ScopedWorkItem is on the stack, because HandleObserverCheck may have set
592 // it during an iteration of the pump where a high priority native work item
593 // executed.
594 EnsureClearedScopedWorkItem();
595 state_->delegate->BeforeWait();
596 }
597
598 return next_wakeup_millis;
599 }
600
HandleCheck()601 bool MessagePumpGlib::HandleCheck() {
602 if (!state_) // state_ may be null during tests.
603 return false;
604
605 // Ensure pump is awake.
606 EnsureSetScopedWorkItem();
607
608 if (state_->do_work_depth > 0) {
609 // Contingency 2.1.1
610 ClearScopedWorkItem();
611 }
612
613 // We usually have a single message on the wakeup pipe, since we are only
614 // signaled when the queue went from empty to non-empty, but there can be
615 // two messages if a task posted a task, hence we read at most two bytes.
616 // The glib poll will tell us whether there was data, so this read
617 // shouldn't block.
618 if (wakeup_gpollfd_->revents & G_IO_IN) {
619 char msg[2];
620 const long num_bytes = HANDLE_EINTR(read(wakeup_pipe_read_, msg, 2));
621 if (num_bytes < 1) {
622 NOTREACHED() << "Error reading from the wakeup pipe.";
623 }
624 DCHECK((num_bytes == 1 && msg[0] == '!') ||
625 (num_bytes == 2 && msg[0] == '!' && msg[1] == '!'));
626 // Since we ate the message, we need to record that we have immediate work,
627 // because HandleCheck() may be called without HandleDispatch being called
628 // afterwards.
629 state_->next_work_info = {TimeTicks()};
630 return true;
631 }
632
633 // As described in the summary at the top : Check is a second-chance to
634 // Prepare, verify whether we have work ready again.
635 if (GetTimeIntervalMilliseconds(state_->next_work_info.delayed_run_time) ==
636 0) {
637 return true;
638 }
639
640 return false;
641 }
642
HandleDispatch()643 void MessagePumpGlib::HandleDispatch() {
644 // Contingency 3.2.1
645 EnsureClearedScopedWorkItem();
646
647 // Contingency 3.1.1
648 ++state_->do_work_depth;
649 state_->next_work_info = state_->delegate->DoWork();
650 --state_->do_work_depth;
651
652 if (state_ && state_->do_work_depth > 0) {
653 // Contingency 3.1.2
654 EnsureClearedScopedWorkItem();
655 }
656 }
657
Run(Delegate * delegate)658 void MessagePumpGlib::Run(Delegate* delegate) {
659 RunState state(delegate);
660
661 RunState* previous_state = state_;
662 state_ = &state;
663
664 // We really only do a single task for each iteration of the loop. If we
665 // have done something, assume there is likely something more to do. This
666 // will mean that we don't block on the message pump until there was nothing
667 // more to do. We also set this to true to make sure not to block on the
668 // first iteration of the loop, so RunUntilIdle() works correctly.
669 bool more_work_is_plausible = true;
670
671 // We run our own loop instead of using g_main_loop_quit in one of the
672 // callbacks. This is so we only quit our own loops, and we don't quit
673 // nested loops run by others. TODO(deanm): Is this what we want?
674 for (;;) {
675 // ScopedWorkItem to account for any native work until the runloop starts
676 // running chrome work.
677 SetScopedWorkItem();
678
679 // Don't block if we think we have more work to do.
680 bool block = !more_work_is_plausible;
681
682 OnEntryToGlib();
683 more_work_is_plausible = g_main_context_iteration(context_, block);
684 OnExitFromGlib();
685
686 if (state_->should_quit)
687 break;
688
689 // Contingency 4.2.1
690 EnsureClearedScopedWorkItem();
691
692 // Contingency 4.1.1
693 ++state_->do_work_depth;
694 state_->next_work_info = state_->delegate->DoWork();
695 --state_->do_work_depth;
696
697 more_work_is_plausible |= state_->next_work_info.is_immediate();
698 if (state_->should_quit)
699 break;
700
701 if (more_work_is_plausible)
702 continue;
703
704 more_work_is_plausible = state_->delegate->DoIdleWork();
705 if (state_->should_quit)
706 break;
707 }
708
709 state_ = previous_state;
710 }
711
Quit()712 void MessagePumpGlib::Quit() {
713 if (state_) {
714 state_->should_quit = true;
715 } else {
716 NOTREACHED() << "Quit called outside Run!";
717 }
718 }
719
ScheduleWork()720 void MessagePumpGlib::ScheduleWork() {
721 // This can be called on any thread, so we don't want to touch any state
722 // variables as we would then need locks all over. This ensures that if
723 // we are sleeping in a poll that we will wake up.
724 char msg = '!';
725 if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
726 NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
727 }
728 }
729
ScheduleDelayedWork(const Delegate::NextWorkInfo & next_work_info)730 void MessagePumpGlib::ScheduleDelayedWork(
731 const Delegate::NextWorkInfo& next_work_info) {
732 // We need to wake up the loop in case the poll timeout needs to be
733 // adjusted. This will cause us to try to do work, but that's OK.
734 ScheduleWork();
735 }
736
HandleFdWatchCheck(FdWatchController * controller)737 bool MessagePumpGlib::HandleFdWatchCheck(FdWatchController* controller) {
738 DCHECK(controller);
739 gushort flags = controller->poll_fd_->revents;
740 return (flags & G_IO_IN) || (flags & G_IO_OUT);
741 }
742
HandleFdWatchDispatch(FdWatchController * controller)743 void MessagePumpGlib::HandleFdWatchDispatch(FdWatchController* controller) {
744 DCHECK(controller);
745 DCHECK(controller->poll_fd_);
746 gushort flags = controller->poll_fd_->revents;
747 if ((flags & G_IO_IN) && (flags & G_IO_OUT)) {
748 // Both callbacks will be called. It is necessary to check that
749 // |controller| is not destroyed.
750 bool controller_was_destroyed = false;
751 controller->was_destroyed_ = &controller_was_destroyed;
752 controller->NotifyCanWrite();
753 if (!controller_was_destroyed)
754 controller->NotifyCanRead();
755 if (!controller_was_destroyed)
756 controller->was_destroyed_ = nullptr;
757 } else if (flags & G_IO_IN) {
758 controller->NotifyCanRead();
759 } else if (flags & G_IO_OUT) {
760 controller->NotifyCanWrite();
761 }
762 }
763
ShouldQuit() const764 bool MessagePumpGlib::ShouldQuit() const {
765 CHECK(state_);
766 return state_->should_quit;
767 }
768
SetScopedWorkItem()769 void MessagePumpGlib::SetScopedWorkItem() {
770 // |state_| can be null during tests
771 if (!state_) {
772 return;
773 }
774 // If there exists a ScopedDoWorkItem in the current RunState, it cannot be
775 // overwritten.
776 CHECK(state_->scoped_do_work_item.IsNull());
777
778 // In the case that we're more than two work items deep, don't bother tracking
779 // individual native events anymore. Note that this won't cause out-of-order
780 // end work items, because the work item is cleared before entering the second
781 // DoWork().
782 if (state_->do_work_depth < 2) {
783 state_->scoped_do_work_item = state_->delegate->BeginWorkItem();
784 }
785 }
786
ClearScopedWorkItem()787 void MessagePumpGlib::ClearScopedWorkItem() {
788 // |state_| can be null during tests
789 if (!state_) {
790 return;
791 }
792
793 CHECK(!state_->scoped_do_work_item.IsNull());
794 // See identical check in SetScopedWorkItem
795 if (state_->do_work_depth < 2) {
796 state_->scoped_do_work_item = Delegate::ScopedDoWorkItem();
797 }
798 }
799
EnsureSetScopedWorkItem()800 void MessagePumpGlib::EnsureSetScopedWorkItem() {
801 // |state_| can be null during tests
802 if (!state_) {
803 return;
804 }
805 if (state_->scoped_do_work_item.IsNull()) {
806 SetScopedWorkItem();
807 }
808 }
809
EnsureClearedScopedWorkItem()810 void MessagePumpGlib::EnsureClearedScopedWorkItem() {
811 // |state_| can be null during tests
812 if (!state_) {
813 return;
814 }
815 if (!state_->scoped_do_work_item.IsNull()) {
816 ClearScopedWorkItem();
817 }
818 }
819
RegisterNested()820 void MessagePumpGlib::RegisterNested() {
821 // |state_| can be null during tests
822 if (!state_) {
823 return;
824 }
825 CHECK(state_->native_loop_do_work_item.IsNull());
826
827 // Transfer `scoped_do_work_item` to `native_do_work_item`, and so the
828 // ephemeral `scoped_do_work_item` will be coming in and out of existence on
829 // top of `native_do_work_item`, whose state hasn't been deleted.
830
831 if (state_->scoped_do_work_item.IsNull()) {
832 state_->native_loop_do_work_item = state_->delegate->BeginWorkItem();
833 } else {
834 // This clears state_->scoped_do_work_item.
835 state_->native_loop_do_work_item = std::move(state_->scoped_do_work_item);
836 }
837 SetScopedWorkItem();
838 ClearScopedWorkItem();
839 }
840
UnregisterNested()841 void MessagePumpGlib::UnregisterNested() {
842 // |state_| can be null during tests
843 if (!state_) {
844 return;
845 }
846 CHECK(!state_->native_loop_do_work_item.IsNull());
847
848 EnsureClearedScopedWorkItem();
849 // Nesting exits here.
850 state_->native_loop_do_work_item = Delegate::ScopedDoWorkItem();
851 }
852
NestIfRequired()853 void MessagePumpGlib::NestIfRequired() {
854 // |state_| can be null during tests
855 if (!state_) {
856 return;
857 }
858 if (state_->native_loop_do_work_item.IsNull() &&
859 state_->g_depth_on_iteration.has_value() &&
860 g_main_depth() != state_->g_depth_on_iteration.value()) {
861 RegisterNested();
862 }
863 }
864
UnnestIfRequired()865 void MessagePumpGlib::UnnestIfRequired() {
866 // |state_| can be null during tests
867 if (!state_) {
868 return;
869 }
870 if (!state_->native_loop_do_work_item.IsNull()) {
871 UnregisterNested();
872 }
873 }
874
OnEntryToGlib()875 void MessagePumpGlib::OnEntryToGlib() {
876 // |state_| can be null during tests
877 if (!state_) {
878 return;
879 }
880 CHECK(!state_->g_depth_on_iteration.has_value());
881 state_->g_depth_on_iteration.emplace(g_main_depth());
882 }
883
OnExitFromGlib()884 void MessagePumpGlib::OnExitFromGlib() {
885 // |state_| can be null during tests
886 if (!state_) {
887 return;
888 }
889 state_->g_depth_on_iteration.reset();
890 UnnestIfRequired();
891 }
892
893 } // namespace base
894