Skip to content

Commit

Permalink
compile successfully
Browse files Browse the repository at this point in the history
  • Loading branch information
malloxpb committed Jul 20, 2018
1 parent c1eb8a3 commit f3249ce
Show file tree
Hide file tree
Showing 7 changed files with 2,067 additions and 0 deletions.
109 changes: 109 additions & 0 deletions at_exit.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/at_exit.h"

#include <stddef.h>
#include <ostream>
#include <utility>

#include "base/bind.h"
#include "base/callback.h"
// #include "base/logging.h"

namespace base {

// Keep a stack of registered AtExitManagers. We always operate on the most
// recent, and we should never have more than one outside of testing (for a
// statically linked version of this library). Testing may use the shadow
// version of the constructor, and if we are building a dynamic library we may
// end up with multiple AtExitManagers on the same process. We don't protect
// this for thread-safe access, since it will only be modified in testing.
static AtExitManager* g_top_manager = nullptr;

static bool g_disable_managers = false;

AtExitManager::AtExitManager()
: processing_callbacks_(false), next_manager_(g_top_manager) {
// If multiple modules instantiate AtExitManagers they'll end up living in this
// module... they have to coexist.
#if !defined(COMPONENT_BUILD)
// DCHECK(!g_top_manager);
#endif
g_top_manager = this;
}

AtExitManager::~AtExitManager() {
if (!g_top_manager) {
// NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
return;
}
// DCHECK_EQ(this, g_top_manager);

if (!g_disable_managers)
ProcessCallbacksNow();
g_top_manager = next_manager_;
}

// static
void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
// DCHECK(func);
RegisterTask(base::Bind(func, param));
}

// static
void AtExitManager::RegisterTask(base::Closure task) {
if (!g_top_manager) {
// NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
return;
}

AutoLock lock(g_top_manager->lock_);
// DCHECK(!g_top_manager->processing_callbacks_);
g_top_manager->stack_.push(std::move(task));
}

// static
void AtExitManager::ProcessCallbacksNow() {
if (!g_top_manager) {
// NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
return;
}

// Callbacks may try to add new callbacks, so run them without holding
// |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
// handle it gracefully in release builds so we don't deadlock.
base::stack<base::Closure> tasks;
{
AutoLock lock(g_top_manager->lock_);
tasks.swap(g_top_manager->stack_);
g_top_manager->processing_callbacks_ = true;
}

// Relax the cross-thread access restriction to non-thread-safe RefCount.
// It's safe since all other threads should be terminated at this point.
ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;

while (!tasks.empty()) {
base::Closure task = tasks.top();
task.Run();
tasks.pop();
}

// Expect that all callbacks have been run.
// DCHECK(g_top_manager->stack_.empty());
}

void AtExitManager::DisableAllAtExitManagers() {
AutoLock lock(g_top_manager->lock_);
g_disable_managers = true;
}

AtExitManager::AtExitManager(bool shadow)
: processing_callbacks_(false), next_manager_(g_top_manager) {
// DCHECK(shadow || !g_top_manager);
g_top_manager = this;
}

} // namespace base
234 changes: 234 additions & 0 deletions atomicops.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,3 +159,237 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// #endif

#endif // BASE_ATOMICOPS_H_


// NOTE: copied from atomicops_internals_portable
// Copyright (c) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// This file is an internal atomic implementation, use atomicops.h instead.
//
// This implementation uses C++11 atomics' member functions. The code base is
// currently written assuming atomicity revolves around accesses instead of
// C++11's memory locations. The burden is on the programmer to ensure that all
// memory locations accessed atomically are never accessed non-atomically (tsan
// should help with this).
//
// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
// locations as truly atomic. See the static_assert below.
//
// Of note in this implementation:
// * All NoBarrier variants are implemented as relaxed.
// * All Barrier variants are implemented as sequentially-consistent.
// * Compare exchange's failure ordering is always the same as the success one
// (except for release, which fails as relaxed): using a weaker ordering is
// only valid under certain uses of compare exchange.
// * Acquire store doesn't exist in the C11 memory model, it is instead
// implemented as a relaxed store followed by a sequentially consistent
// fence.
// * Release load doesn't exist in the C11 memory model, it is instead
// implemented as sequentially consistent fence followed by a relaxed load.
// * Atomic increment is expected to return the post-incremented value, whereas
// C11 fetch add returns the previous value. The implementation therefore
// needs to increment twice (which the compiler should be able to detect and
// optimize).

#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

#include "base/atomicops.h"

#include <atomic>

#include "build/build_config.h"

namespace base {
namespace subtle {

// This implementation is transitional and maintains the original API for
// atomicops.h. This requires casting memory locations to the atomic types, and
// assumes that the API and the C++11 implementation are layout-compatible,
// which isn't true for all implementations or hardware platforms. The static
// assertion should detect this issue, were it to fire then this header
// shouldn't be used.
//
// TODO(jfb) If this header manages to stay committed then the API should be
// modified, and all call sites updated.
typedef volatile std::atomic<Atomic32>* AtomicLocation32;
static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
"incompatible 32-bit atomic layout");

inline void MemoryBarrier() {
#if defined(__GLIBCXX__)
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
// not defined, leading to the linker complaining about undefined references.
__atomic_thread_fence(std::memory_order_seq_cst);
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
#endif
}

inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_relaxed,
std::memory_order_relaxed);
return old_value;
}

inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return ((AtomicLocation32)ptr)
->exchange(new_value, std::memory_order_relaxed);
}

inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment +
((AtomicLocation32)ptr)
->fetch_add(increment, std::memory_order_relaxed);
}

inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
}

inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_acquire,
std::memory_order_acquire);
return old_value;
}

inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_release,
std::memory_order_relaxed);
return old_value;
}

inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
}

inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
MemoryBarrier();
}

inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_release);
}

inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
}

inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
}

inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
}

#if defined(ARCH_CPU_64_BITS)

typedef volatile std::atomic<Atomic64>* AtomicLocation64;
static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
"incompatible 64-bit atomic layout");

inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_relaxed,
std::memory_order_relaxed);
return old_value;
}

inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return ((AtomicLocation64)ptr)
->exchange(new_value, std::memory_order_relaxed);
}

inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment +
((AtomicLocation64)ptr)
->fetch_add(increment, std::memory_order_relaxed);
}

inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
}

inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_acquire,
std::memory_order_acquire);
return old_value;
}

inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_release,
std::memory_order_relaxed);
return old_value;
}

inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
}

inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
MemoryBarrier();
}

inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_release);
}

inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
}

inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
}

inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
}

#endif // defined(ARCH_CPU_64_BITS)
} // namespace subtle
} // namespace base

#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
Loading

0 comments on commit f3249ce

Please sign in to comment.