summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorreveman <reveman@chromium.org>2014-12-18 11:22:28 -0800
committerCommit bot <commit-bot@chromium.org>2014-12-18 19:23:12 +0000
commit1fef3ffc00485992f70a3ccce57f89538718c513 (patch)
tree6a554b74975c549eb4ba245dad1bbbfb492550b8 /base
parent6c1dfeb5606544bb37a3ffe9ba49aa1c6a973fed (diff)
downloadchromium_src-1fef3ffc00485992f70a3ccce57f89538718c513.zip
chromium_src-1fef3ffc00485992f70a3ccce57f89538718c513.tar.gz
chromium_src-1fef3ffc00485992f70a3ccce57f89538718c513.tar.bz2
base: Add ashmem support to base::DiscardableSharedMemory implementation.
This adjust the base::DiscardableSharedMemory API to to support ashmem and locking/unlocking of individual pages. This API change will also be used in the near future to implement a child process side free list on top of the base::DiscardableSharedMemory API. BUG=429415 TEST=base_unittests --gtest_filter=DiscardableSharedMemoryTest.LockAndUnlockRange Review URL: https://codereview.chromium.org/809603004 Cr-Commit-Position: refs/heads/master@{#309054}
Diffstat (limited to 'base')
-rw-r--r--base/memory/discardable_memory_shmem.cc4
-rw-r--r--base/memory/discardable_shared_memory.cc171
-rw-r--r--base/memory/discardable_shared_memory.h48
-rw-r--r--base/memory/discardable_shared_memory_unittest.cc104
-rw-r--r--base/process/process_metrics.h3
-rw-r--r--base/process/process_metrics_ios.cc4
-rw-r--r--base/process/process_metrics_posix.cc4
-rw-r--r--base/process/process_metrics_win.cc4
8 files changed, 275 insertions, 67 deletions
diff --git a/base/memory/discardable_memory_shmem.cc b/base/memory/discardable_memory_shmem.cc
index 77699f0..4282481 100644
--- a/base/memory/discardable_memory_shmem.cc
+++ b/base/memory/discardable_memory_shmem.cc
@@ -93,7 +93,7 @@ void* DiscardableMemoryShmem::Memory() const {
}
bool DiscardableMemoryShmem::AllocateAndAcquireLock() {
- if (shared_memory_ && shared_memory_->Lock())
+ if (shared_memory_ && shared_memory_->Lock(0, 0))
return true;
// TODO(reveman): Allocate fixed size memory segments and use a free list to
@@ -105,7 +105,7 @@ bool DiscardableMemoryShmem::AllocateAndAcquireLock() {
}
void DiscardableMemoryShmem::ReleaseLock() {
- shared_memory_->Unlock();
+ shared_memory_->Unlock(0, 0);
}
void DiscardableMemoryShmem::Purge() {
diff --git a/base/memory/discardable_shared_memory.cc b/base/memory/discardable_shared_memory.cc
index 851f1ad..eb9e552 100644
--- a/base/memory/discardable_shared_memory.cc
+++ b/base/memory/discardable_shared_memory.cc
@@ -13,6 +13,11 @@
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/numerics/safe_math.h"
+#include "base/process/process_metrics.h"
+
+#if defined(OS_ANDROID)
+#include "third_party/ashmem/ashmem.h"
+#endif
namespace base {
namespace {
@@ -59,7 +64,7 @@ struct SharedState {
SharedState(LockState lock_state, Time timestamp) {
int64 wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
DCHECK_GE(wire_timestamp, 0);
- DCHECK((lock_state & ~1) == 0);
+ DCHECK_EQ(lock_state & ~1, 0);
value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
}
@@ -84,14 +89,28 @@ SharedState* SharedStateFromSharedMemory(const SharedMemory& shared_memory) {
return static_cast<SharedState*>(shared_memory.memory());
}
+// Round up |size| to a multiple of alignment, which must be a power of two.
+size_t Align(size_t alignment, size_t size) {
+ DCHECK_EQ(alignment & (alignment - 1), 0u);
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
+// Round up |size| to a multiple of page size.
+size_t AlignToPageSize(size_t size) {
+ return Align(base::GetPageSize(), size);
+}
+
} // namespace
-DiscardableSharedMemory::DiscardableSharedMemory() {
+DiscardableSharedMemory::DiscardableSharedMemory()
+ : mapped_size_(0), locked_page_count_(0) {
}
DiscardableSharedMemory::DiscardableSharedMemory(
SharedMemoryHandle shared_memory_handle)
- : shared_memory_(shared_memory_handle, false) {
+ : shared_memory_(shared_memory_handle, false),
+ mapped_size_(0),
+ locked_page_count_(0) {
}
DiscardableSharedMemory::~DiscardableSharedMemory() {
@@ -99,13 +118,22 @@ DiscardableSharedMemory::~DiscardableSharedMemory() {
bool DiscardableSharedMemory::CreateAndMap(size_t size) {
CheckedNumeric<size_t> checked_size = size;
- checked_size += sizeof(SharedState);
+ checked_size += AlignToPageSize(sizeof(SharedState));
if (!checked_size.IsValid())
return false;
if (!shared_memory_.CreateAndMapAnonymous(checked_size.ValueOrDie()))
return false;
+ mapped_size_ =
+ shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));
+
+ locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON
+ for (size_t page = 0; page < locked_page_count_; ++page)
+ locked_pages_.insert(page);
+#endif
+
DCHECK(last_known_usage_.is_null());
SharedState new_state(SharedState::LOCKED, Time());
subtle::Release_Store(&SharedStateFromSharedMemory(shared_memory_)->value.i,
@@ -114,35 +142,130 @@ bool DiscardableSharedMemory::CreateAndMap(size_t size) {
}
bool DiscardableSharedMemory::Map(size_t size) {
- return shared_memory_.Map(sizeof(SharedState) + size);
+ if (!shared_memory_.Map(AlignToPageSize(sizeof(SharedState)) + size))
+ return false;
+
+ mapped_size_ =
+ shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));
+
+ locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON
+ for (size_t page = 0; page < locked_page_count_; ++page)
+ locked_pages_.insert(page);
+#endif
+
+ return true;
}
-bool DiscardableSharedMemory::Lock() {
- DCHECK(shared_memory_.memory());
+bool DiscardableSharedMemory::Lock(size_t offset, size_t length) {
+ DCHECK_EQ(AlignToPageSize(offset), offset);
+ DCHECK_EQ(AlignToPageSize(length), length);
+
+ // Calls to this function must synchronized properly.
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
// Return false when instance has been purged or not initialized properly by
// checking if |last_known_usage_| is NULL.
if (last_known_usage_.is_null())
return false;
- SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
- SharedState new_state(SharedState::LOCKED, Time());
- SharedState result(subtle::Acquire_CompareAndSwap(
- &SharedStateFromSharedMemory(shared_memory_)->value.i,
- old_state.value.i,
- new_state.value.i));
- if (result.value.u == old_state.value.u)
- return true;
+ DCHECK(shared_memory_.memory());
+
+ // We need to successfully acquire the platform independent lock before
+ // individual pages can be locked.
+ if (!locked_page_count_) {
+ SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
+ SharedState new_state(SharedState::LOCKED, Time());
+ SharedState result(subtle::Acquire_CompareAndSwap(
+ &SharedStateFromSharedMemory(shared_memory_)->value.i,
+ old_state.value.i,
+ new_state.value.i));
+ if (result.value.u != old_state.value.u) {
+ // Update |last_known_usage_| in case the above CAS failed because of
+ // an incorrect timestamp.
+ last_known_usage_ = result.GetTimestamp();
+ return false;
+ }
+ }
+
+ // Zero for length means "everything onward".
+ if (!length)
+ length = AlignToPageSize(mapped_size_) - offset;
+
+ size_t start = offset / base::GetPageSize();
+ size_t end = start + length / base::GetPageSize();
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+ // Add pages to |locked_page_count_|.
+ // Note: Locking a page that is already locked is an error.
+ locked_page_count_ += end - start;
+#if DCHECK_IS_ON
+ // Detect incorrect usage by keeping track of exactly what pages are locked.
+ for (auto page = start; page < end; ++page) {
+ auto result = locked_pages_.insert(page);
+ DCHECK(result.second);
+ }
+ DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
- // Update |last_known_usage_| in case the above CAS failed because of
- // an incorrect timestamp.
- last_known_usage_ = result.GetTimestamp();
- return false;
+#if defined(OS_ANDROID)
+ SharedMemoryHandle handle = shared_memory_.handle();
+ DCHECK(SharedMemory::IsHandleValid(handle));
+ if (ashmem_pin_region(
+ handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
+ return false;
+ }
+#endif
+
+ return true;
}
-void DiscardableSharedMemory::Unlock() {
+void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
+ DCHECK_EQ(AlignToPageSize(offset), offset);
+ DCHECK_EQ(AlignToPageSize(length), length);
+
+ // Calls to this function must synchronized properly.
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+ // Zero for length means "everything onward".
+ if (!length)
+ length = AlignToPageSize(mapped_size_) - offset;
+
DCHECK(shared_memory_.memory());
+#if defined(OS_ANDROID)
+ SharedMemoryHandle handle = shared_memory_.handle();
+ DCHECK(SharedMemory::IsHandleValid(handle));
+ if (ashmem_unpin_region(
+ handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
+ DPLOG(ERROR) << "ashmem_unpin_region() failed";
+ }
+#endif
+
+ size_t start = offset / base::GetPageSize();
+ size_t end = start + length / base::GetPageSize();
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+ // Remove pages from |locked_page_count_|.
+ // Note: Unlocking a page that is not locked is an error.
+ DCHECK_GE(locked_page_count_, end - start);
+ locked_page_count_ -= end - start;
+#if DCHECK_IS_ON
+ // Detect incorrect usage by keeping track of exactly what pages are locked.
+ for (auto page = start; page < end; ++page) {
+ auto erased_count = locked_pages_.erase(page);
+ DCHECK_EQ(1u, erased_count);
+ }
+ DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
+
+ // Early out and avoid releasing the platform independent lock if some pages
+ // are still locked.
+ if (locked_page_count_)
+ return;
+
Time current_time = Now();
DCHECK(!current_time.is_null());
@@ -151,7 +274,7 @@ void DiscardableSharedMemory::Unlock() {
// Note: timestamp cannot be NULL as that is a unique value used when
// locked or purged.
DCHECK(!new_state.GetTimestamp().is_null());
- // Timestamps precision should at least be accurate to the second.
+ // Timestamp precision should at least be accurate to the second.
DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
(current_time - Time::UnixEpoch()).InSeconds());
SharedState result(subtle::Release_CompareAndSwap(
@@ -165,10 +288,14 @@ void DiscardableSharedMemory::Unlock() {
}
void* DiscardableSharedMemory::memory() const {
- return SharedStateFromSharedMemory(shared_memory_) + 1;
+ return reinterpret_cast<uint8*>(shared_memory_.memory()) +
+ AlignToPageSize(sizeof(SharedState));
}
bool DiscardableSharedMemory::Purge(Time current_time) {
+ // Calls to this function must synchronized properly.
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
// Early out if not mapped. This can happen if the segment was previously
// unmapped using a call to Close().
if (!shared_memory_.memory())
diff --git a/base/memory/discardable_shared_memory.h b/base/memory/discardable_shared_memory.h
index ca2accf..c69c970 100644
--- a/base/memory/discardable_shared_memory.h
+++ b/base/memory/discardable_shared_memory.h
@@ -6,18 +6,27 @@
#define BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
#include "base/base_export.h"
+#include "base/logging.h"
#include "base/memory/shared_memory.h"
+#include "base/threading/thread_collision_warner.h"
#include "base/time/time.h"
+#if DCHECK_IS_ON
+#include <set>
+#endif
+
namespace base {
// Platform abstraction for discardable shared memory.
+//
+// This class is not thread-safe. Clients are responsible for synchronizing
+// access to an instance of this class.
class BASE_EXPORT DiscardableSharedMemory {
public:
DiscardableSharedMemory();
// Create a new DiscardableSharedMemory object from an existing, open shared
- // memory file.
+ // memory file. Memory must be locked.
explicit DiscardableSharedMemory(SharedMemoryHandle handle);
// Closes any open files.
@@ -27,26 +36,35 @@ class BASE_EXPORT DiscardableSharedMemory {
// Returns true on success and false on failure.
bool CreateAndMap(size_t size);
- // Maps the discardable memory into the caller's address space.
+ // Maps the locked discardable memory into the caller's address space.
// Returns true on success, false otherwise.
bool Map(size_t size);
// The actual size of the mapped memory (may be larger than requested).
- size_t mapped_size() const { return shared_memory_.mapped_size(); }
+ size_t mapped_size() const { return mapped_size_; }
// Returns a shared memory handle for this DiscardableSharedMemory object.
SharedMemoryHandle handle() const { return shared_memory_.handle(); }
- // Locks the memory so that it will not be purged by the system. Returns
- // true if successful and the memory is still resident. Locking can fail
- // for three reasons; object might have been purged, our last known usage
+ // Locks a range of memory so that it will not be purged by the system.
+ // Returns true if successful and the memory is still resident. Locking can
+ // fail for three reasons; object might have been purged, our last known usage
// timestamp might be out of date or memory might already be locked. Last
// know usage time is updated to the actual last usage timestamp if memory
- // is still resident or 0 if not.
- bool Lock();
-
- // Unlock previously successfully locked memory.
- void Unlock();
+ // is still resident or 0 if not. The range of memory must be unlocked. The
+ // result of trying to lock an already locked range is undefined.
+ // |offset| and |length| must both be a multiple of the page size as returned
+ // by GetPageSize().
+ // Passing 0 for |length| means "everything onward".
+ bool Lock(size_t offset, size_t length);
+
+ // Unlock a previously successfully locked range of memory. The range of
+ // memory must be locked. The result of trying to unlock a not
+ // previously locked range is undefined.
+ // |offset| and |length| must both be a multiple of the page size as returned
+ // by GetPageSize().
+ // Passing 0 for |length| means "everything onward".
+ void Unlock(size_t offset, size_t length);
// Gets a pointer to the opened discardable memory space. Discardable memory
// must have been mapped via Map().
@@ -99,6 +117,14 @@ class BASE_EXPORT DiscardableSharedMemory {
virtual Time Now() const;
SharedMemory shared_memory_;
+ size_t mapped_size_;
+ size_t locked_page_count_;
+#if DCHECK_IS_ON
+ std::set<size_t> locked_pages_;
+#endif
+ // Implementation is not thread-safe but still usable if clients are
+ // synchronized somehow. Use a collision warner to detect incorrect usage.
+ DFAKE_MUTEX(thread_collision_warner_);
Time last_known_usage_;
DISALLOW_COPY_AND_ASSIGN(DiscardableSharedMemory);
diff --git a/base/memory/discardable_shared_memory_unittest.cc b/base/memory/discardable_shared_memory_unittest.cc
index e517429..90441e1 100644
--- a/base/memory/discardable_shared_memory_unittest.cc
+++ b/base/memory/discardable_shared_memory_unittest.cc
@@ -4,6 +4,7 @@
#include "base/basictypes.h"
#include "base/memory/discardable_shared_memory.h"
+#include "base/process/process_metrics.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -20,7 +21,7 @@ class TestDiscardableSharedMemory : public DiscardableSharedMemory {
private:
// Overriden from DiscardableSharedMemory:
- virtual Time Now() const override { return now_; }
+ Time Now() const override { return now_; }
Time now_;
};
@@ -60,13 +61,17 @@ TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
// Memory is initially locked. Unlock it.
memory1.SetNow(Time::FromDoubleT(1));
- memory1.Unlock();
+ memory1.Unlock(0, 0);
// Lock and unlock memory.
- rv = memory1.Lock();
+ rv = memory1.Lock(0, 0);
EXPECT_TRUE(rv);
memory1.SetNow(Time::FromDoubleT(2));
- memory1.Unlock();
+ memory1.Unlock(0, 0);
+
+ // Lock again before duplicating and passing ownership to new instance.
+ rv = memory1.Lock(0, 0);
+ EXPECT_TRUE(rv);
SharedMemoryHandle shared_handle;
ASSERT_TRUE(
@@ -77,35 +82,21 @@ TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
rv = memory2.Map(kDataSize);
ASSERT_TRUE(rv);
- // Lock first instance again.
- rv = memory1.Lock();
- EXPECT_TRUE(rv);
-
// Unlock second instance.
memory2.SetNow(Time::FromDoubleT(3));
- memory2.Unlock();
+ memory2.Unlock(0, 0);
- // Lock and unlock second instance.
- rv = memory2.Lock();
+ // Lock second instance before passing ownership back to first instance.
+ rv = memory2.Lock(0, 0);
EXPECT_TRUE(rv);
- memory2.SetNow(Time::FromDoubleT(4));
- memory2.Unlock();
-
- // Try to lock first instance again. Should fail as first instance has an
- // incorrect last know usage time.
- rv = memory1.Lock();
- EXPECT_FALSE(rv);
// Memory should still be resident.
rv = memory1.IsMemoryResident();
EXPECT_TRUE(rv);
- // Second attempt to lock first instance should succeed as last known usage
- // time is now correct.
- rv = memory1.Lock();
- EXPECT_TRUE(rv);
- memory1.SetNow(Time::FromDoubleT(5));
- memory1.Unlock();
+ // Unlock first instance.
+ memory1.SetNow(Time::FromDoubleT(4));
+ memory1.Unlock(0, 0);
}
TEST(DiscardableSharedMemoryTest, Purge) {
@@ -129,7 +120,7 @@ TEST(DiscardableSharedMemoryTest, Purge) {
EXPECT_FALSE(rv);
memory2.SetNow(Time::FromDoubleT(2));
- memory2.Unlock();
+ memory2.Unlock(0, 0);
ASSERT_TRUE(memory2.IsMemoryResident());
@@ -144,7 +135,7 @@ TEST(DiscardableSharedMemoryTest, Purge) {
EXPECT_TRUE(rv);
// Lock should fail as memory has been purged.
- rv = memory2.Lock();
+ rv = memory2.Lock(0, 0);
EXPECT_FALSE(rv);
ASSERT_FALSE(memory2.IsMemoryResident());
@@ -167,11 +158,11 @@ TEST(DiscardableSharedMemoryTest, LastUsed) {
ASSERT_TRUE(rv);
memory2.SetNow(Time::FromDoubleT(1));
- memory2.Unlock();
+ memory2.Unlock(0, 0);
EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(1));
- rv = memory2.Lock();
+ rv = memory2.Lock(0, 0);
EXPECT_TRUE(rv);
// This should fail as memory is locked.
@@ -182,7 +173,7 @@ TEST(DiscardableSharedMemoryTest, LastUsed) {
EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
memory2.SetNow(Time::FromDoubleT(3));
- memory2.Unlock();
+ memory2.Unlock(0, 0);
// Usage time should be correct for |memory2| instance.
EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(3));
@@ -235,16 +226,65 @@ TEST(DiscardableSharedMemoryTest, LockShouldAlwaysFailAfterSuccessfulPurge) {
ASSERT_TRUE(rv);
memory2.SetNow(Time::FromDoubleT(1));
- memory2.Unlock();
+ memory2.Unlock(0, 0);
rv = memory2.Purge(Time::FromDoubleT(2));
EXPECT_TRUE(rv);
// Lock should fail as memory has been purged.
- rv = memory2.Lock();
+ rv = memory2.Lock(0, 0);
+ EXPECT_FALSE(rv);
+ rv = memory1.Lock(0, 0);
+ EXPECT_FALSE(rv);
+}
+
+TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
+ const uint32 kDataSize = 32;
+
+ uint32 data_size_in_bytes = kDataSize * base::GetPageSize();
+
+ TestDiscardableSharedMemory memory1;
+ bool rv = memory1.CreateAndMap(data_size_in_bytes);
+ ASSERT_TRUE(rv);
+
+ SharedMemoryHandle shared_handle;
+ ASSERT_TRUE(
+ memory1.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+ ASSERT_TRUE(SharedMemory::IsHandleValid(shared_handle));
+
+ TestDiscardableSharedMemory memory2(shared_handle);
+ rv = memory2.Map(data_size_in_bytes);
+ ASSERT_TRUE(rv);
+
+ // Unlock first page.
+ memory2.SetNow(Time::FromDoubleT(1));
+ memory2.Unlock(0, base::GetPageSize());
+
+ rv = memory1.Purge(Time::FromDoubleT(2));
EXPECT_FALSE(rv);
- rv = memory1.Lock();
+
+ // Unlock second page.
+ memory2.SetNow(Time::FromDoubleT(3));
+ memory2.Unlock(base::GetPageSize(), base::GetPageSize());
+
+ rv = memory1.Purge(Time::FromDoubleT(4));
EXPECT_FALSE(rv);
+
+ // Unlock anything onwards.
+ memory2.SetNow(Time::FromDoubleT(5));
+ memory2.Unlock(2 * base::GetPageSize(), 0);
+
+ // Memory is unlocked, but our usage timestamp is incorrect.
+ rv = memory1.Purge(Time::FromDoubleT(6));
+ EXPECT_FALSE(rv);
+
+ // The failed purge attempt should have updated usage time to the correct
+ // value.
+ EXPECT_EQ(Time::FromDoubleT(5), memory1.last_known_usage());
+
+ // Purge should now succeed.
+ rv = memory1.Purge(Time::FromDoubleT(7));
+ EXPECT_TRUE(rv);
}
} // namespace
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index ca23ac8..d06e018 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -231,6 +231,9 @@ class BASE_EXPORT ProcessMetrics {
// Returns 0 if it can't compute the commit charge.
BASE_EXPORT size_t GetSystemCommitCharge();
+// Returns the number of bytes in a memory page.
+BASE_EXPORT size_t GetPageSize();
+
#if defined(OS_POSIX)
// Returns the maximum number of file descriptors that can be open by a process
// at once. If the number is unavailable, a conservative best guess is returned.
diff --git a/base/process/process_metrics_ios.cc b/base/process/process_metrics_ios.cc
index 405c373..9ae838d 100644
--- a/base/process/process_metrics_ios.cc
+++ b/base/process/process_metrics_ios.cc
@@ -65,4 +65,8 @@ void SetFdLimit(unsigned int max_descriptors) {
// Unimplemented.
}
+size_t GetPageSize() {
+ return getpagesize();
+}
+
} // namespace base
diff --git a/base/process/process_metrics_posix.cc b/base/process/process_metrics_posix.cc
index 7afae21..42b3f2d6 100644
--- a/base/process/process_metrics_posix.cc
+++ b/base/process/process_metrics_posix.cc
@@ -68,4 +68,8 @@ void SetFdLimit(unsigned int max_descriptors) {
}
}
+size_t GetPageSize() {
+ return getpagesize();
+}
+
} // namespace base
diff --git a/base/process/process_metrics_win.cc b/base/process/process_metrics_win.cc
index 16db44f..1dd97e6 100644
--- a/base/process/process_metrics_win.cc
+++ b/base/process/process_metrics_win.cc
@@ -284,4 +284,8 @@ size_t GetSystemCommitCharge() {
return (info.CommitTotal * system_info.dwPageSize) / 1024;
}
+size_t GetPageSize() {
+ return PAGESIZE_KB * 1024;
+}
+
} // namespace base