Update On Sun Oct 6 20:31:52 CEST 2024

This commit is contained in:
github-action[bot]
2024-10-06 20:31:53 +02:00
parent f298d82564
commit d52a909dcb
1319 changed files with 27066 additions and 22526 deletions
+7 -8
View File
@@ -41,7 +41,7 @@ jobs:
uses: actions/cache@v4
with:
path: src/third_party/android_toolchain/ndk/
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}-a
- run: ./get-clang.sh
- run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh
- run: |
@@ -49,7 +49,7 @@ jobs:
wget https://snapshot.debian.org/archive/debian/20230611T210420Z/pool/main/q/qemu/qemu-user-static_8.0%2Bdfsg-4_amd64.deb
fi
cache-toolchains-win:
runs-on: windows-2019
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
- name: Cache toolchains
@@ -179,11 +179,10 @@ jobs:
abi: armeabi-v7a
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1' }}-${{ matrix.abi }}.apk
BUNDLE: naiveproxy-plugin-${{ github.event.release.tag_name || 'v1.1.1.1-1' }}-${{ matrix.abi }}.apk
steps:
- uses: actions/checkout@v4
- name: Setup Java
uses: actions/setup-java@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: 17
@@ -204,7 +203,7 @@ jobs:
uses: actions/cache@v4
with:
path: src/third_party/android_toolchain/ndk/
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}-a
- name: Cache sysroot
uses: actions/cache@v4
with:
@@ -241,7 +240,7 @@ jobs:
working-directory: apk
env:
APK_ABI: ${{ matrix.abi }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1' }}
APK_VERSION_NAME: ${{ github.event.release.tag_name || 'v1.1.1.1-1' }}
KEYSTORE_PASS: ${{ secrets.KEYSTORE_PASS }}
run: |
mkdir -p app/libs/$APK_ABI
@@ -261,7 +260,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win:
needs: cache-toolchains-win
runs-on: windows-2019
runs-on: windows-2022
strategy:
fail-fast: false
matrix:
+1 -1
View File
@@ -1 +1 @@
128.0.6613.40
129.0.6668.81
+1 -1
View File
@@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [Exclave](https://github.com/dyhkwong/Exclave), [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome.
+1 -1
View File
@@ -31,7 +31,7 @@ android {
targetSdk = 35
applicationId = "io.nekohasekai.sagernet.plugin.naive"
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt()
versionCode = System.getenv("APK_VERSION_NAME").removePrefix("v").split(".")[0].toInt() * 10 + System.getenv("APK_VERSION_NAME").removePrefix("v").split("-")[1].toInt()
versionName = System.getenv("APK_VERSION_NAME").removePrefix("v")
splits.abi {
isEnable = true
+11
View File
@@ -48,6 +48,7 @@ Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com>
Aiden Grossman <aidengrossmanpso@gmail.com>
Airing Deng <airingdeng@gmail.com>
Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com>
Ajay Sharma <ajay.sh@samsung.com>
@@ -318,6 +319,7 @@ Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com>
Darryl Pogue <darryl@dpogue.ca>
Darshan Sen <raisinten@gmail.com>
Darshini KN <kn.darshini@samsung.com>
Dave Vandyke <kzar@kzar.co.uk>
@@ -580,6 +582,7 @@ Jaehyun Lee <j-hyun.lee@samsung.com>
Jaekyeom Kim <btapiz@gmail.com>
Jaemin Seo <jaemin86.seo@samsung.com>
Jaemo Koo <jaemok@amazon.com>
Jaemo Koo <koo2434@gmail.com>
Jaeseok Yoon <yjaeseok@gmail.com>
Jaewon Choi <jaewon.james.choi@gmail.com>
Jaewon Jung <jw.jung@navercorp.com>
@@ -702,6 +705,7 @@ JongKwon Lee <jongkwon.lee@navercorp.com>
Jongmok Kim <jongmok.kim@navercorp.com>
Jongmok Kim <johny.kimc@gmail.com>
Jongsoo Lee <leejongsoo@gmail.com>
Joonas Halinen <joonashalinen@outlook.com>
Joone Hur <joone.hur@intel.com>
Joonghun Park <pjh0718@gmail.com>
Jorge Villatoro <jorge@tomatocannon.com>
@@ -786,6 +790,8 @@ Ketan Goyal <ketan.goyal@samsung.com>
Kevin Gibbons <bakkot@gmail.com>
Kevin Lee Helpingstine <sig11@reprehensible.net>
Kevin M. McCormick <mckev@amazon.com>
Kexy Biscuit <kexybiscuit@aosc.io>
Kexy Biscuit <kexybiscuit@gmail.com>
Keyou <qqkillyou@gmail.com>
Khasim Syed Mohammed <khasim.mohammed@linaro.org>
Khem Raj <raj.khem@gmail.com>
@@ -821,6 +827,7 @@ Kyungtae Kim <ktf.kim@samsung.com>
Kyungyoung Heo <bbvch13531@gmail.com>
Kyutae Lee <gorisanson@gmail.com>
Lalit Chandivade <lalit.chandivade@einfochips.com>
Lalit Rana <lalitrn44@gmail.com>
Lam Lu <lamlu@amazon.com>
Laszlo Gombos <l.gombos@samsung.com>
Laszlo Radanyi <bekkra@gmail.com>
@@ -923,6 +930,7 @@ Mathias Bynens <mathias@qiwi.be>
Mathieu Meisser <mmeisser@logitech.com>
Matt Arpidone <mma.public@gmail.com>
Matt Fysh <mattfysh@gmail.com>
Matt Harding <majaharding@gmail.com>
Matt Strum <mstrum@amazon.com>
Matt Zeunert <matt@mostlystatic.com>
Matthew "strager" Glazar <strager.nds@gmail.com>
@@ -1045,6 +1053,7 @@ Nivedan Sharma <ni.sharma@samsung.com>
Noam Rosenthal <noam.j.rosenthal@gmail.com>
Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com>
Nourhan Hasan <nourhan.m.hasan@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net>
@@ -1407,6 +1416,7 @@ Tibor Dusnoki <tibor.dusnoki.91@gmail.com>
Tibor Dusnoki <tdusnoki@inf.u-szeged.hu>
Tien Hock Loh <tienhock.loh@starfivetech.com>
Tim Ansell <mithro@mithis.com>
Tim Barry <oregongraperoot@gmail.com>
Tim Niederhausen <tim@rnc-ag.de>
Tim Steiner <twsteiner@gmail.com>
Timo Gurr <timo.gurr@gmail.com>
@@ -1475,6 +1485,7 @@ Wang Weiwei <wangww@dingdao.com>
Wangyang Dai <jludwy@gmail.com>
Wanming Lin <wanming.lin@intel.com>
Wei Li <wei.c.li@intel.com>
Weicong Yu <yuweicong666@gmail.com>
Wen Fan <fanwen1@huawei.com>
Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com>
+302 -403
View File
File diff suppressed because it is too large Load Diff
+6 -15
View File
@@ -381,8 +381,6 @@ component("base") {
"memory/memory_pressure_listener.h",
"memory/memory_pressure_monitor.cc",
"memory/memory_pressure_monitor.h",
"memory/nonscannable_memory.cc",
"memory/nonscannable_memory.h",
"memory/page_size.h",
"memory/platform_shared_memory_handle.cc",
"memory/platform_shared_memory_handle.h",
@@ -422,12 +420,14 @@ component("base") {
"memory/shared_memory_mapper.h",
"memory/shared_memory_mapping.cc",
"memory/shared_memory_mapping.h",
"memory/shared_memory_safety_checker.h",
"memory/shared_memory_security_policy.cc",
"memory/shared_memory_security_policy.h",
"memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h",
"memory/singleton.h",
"memory/stack_allocated.h",
"memory/structured_shared_memory.h",
"memory/unsafe_shared_memory_pool.cc",
"memory/unsafe_shared_memory_pool.h",
"memory/unsafe_shared_memory_region.cc",
@@ -470,6 +470,7 @@ component("base") {
"metrics/histogram_flattener.h",
"metrics/histogram_functions.cc",
"metrics/histogram_functions.h",
"metrics/histogram_functions_internal_overloads.h",
"metrics/histogram_macros.h",
"metrics/histogram_macros_internal.h",
"metrics/histogram_macros_local.h",
@@ -549,12 +550,14 @@ component("base") {
"process/process_info.h",
"process/set_process_title.cc",
"process/set_process_title.h",
"profiler/call_stack_profile_params.h",
"profiler/frame.cc",
"profiler/frame.h",
"profiler/metadata_recorder.cc",
"profiler/metadata_recorder.h",
"profiler/module_cache.cc",
"profiler/module_cache.h",
"profiler/process_type.h",
"profiler/profile_builder.h",
"profiler/register_context.h",
"profiler/sample_metadata.cc",
@@ -786,7 +789,6 @@ component("base") {
"task/thread_pool/pooled_task_runner_delegate.h",
"task/thread_pool/priority_queue.cc",
"task/thread_pool/priority_queue.h",
"task/thread_pool/semaphore.h",
"task/thread_pool/sequence.cc",
"task/thread_pool/sequence.h",
"task/thread_pool/service_thread.cc",
@@ -803,8 +805,6 @@ component("base") {
"task/thread_pool/thread_group.h",
"task/thread_pool/thread_group_impl.cc",
"task/thread_pool/thread_group_impl.h",
"task/thread_pool/thread_group_semaphore.cc",
"task/thread_pool/thread_group_semaphore.h",
"task/thread_pool/thread_group_worker_delegate.cc",
"task/thread_pool/thread_group_worker_delegate.h",
"task/thread_pool/thread_pool_impl.cc",
@@ -815,8 +815,6 @@ component("base") {
"task/thread_pool/worker_thread.cc",
"task/thread_pool/worker_thread.h",
"task/thread_pool/worker_thread_observer.h",
"task/thread_pool/worker_thread_semaphore.cc",
"task/thread_pool/worker_thread_semaphore.h",
"task/thread_pool/worker_thread_set.cc",
"task/thread_pool/worker_thread_set.h",
"task/thread_pool/worker_thread_waitable_event.cc",
@@ -1106,7 +1104,7 @@ component("base") {
if (build_rust_base_conversions) {
sources += [
"containers/span_rust.h",
"strings/string_piece_rust.h",
"strings/string_view_rust.h",
]
# Base provides conversions between CXX types and base types (e.g.
@@ -1474,7 +1472,6 @@ component("base") {
"rand_util_nacl.cc",
"sync_socket_nacl.cc",
"system/sys_info_nacl.cc",
"task/thread_pool/semaphore/semaphore_default.cc",
"threading/platform_thread_linux_base.cc",
"threading/platform_thread_nacl.cc",
]
@@ -1554,10 +1551,6 @@ component("base") {
]
}
if (is_linux || is_chromeos || is_android || is_fuchsia) {
sources += [ "task/thread_pool/semaphore/semaphore_posix.cc" ]
}
if (is_posix) {
sources += [
"base_paths_posix.h",
@@ -1736,7 +1729,6 @@ component("base") {
"synchronization/waitable_event_watcher_win.cc",
"synchronization/waitable_event_win.cc",
"system/sys_info_win.cc",
"task/thread_pool/semaphore/semaphore_win.cc",
"threading/platform_thread_win.cc",
"threading/platform_thread_win.h",
"threading/thread_local_storage_win.cc",
@@ -1951,7 +1943,6 @@ component("base") {
"strings/sys_string_conversions_apple.mm",
"synchronization/waitable_event_apple.cc",
"system/sys_info_apple.mm",
"task/thread_pool/semaphore/semaphore_apple.cc",
"threading/platform_thread_apple.mm",
"time/time_apple.mm",
]
-5
View File
@@ -1,9 +1,4 @@
include_rules = [
# `#include "partition_alloc/..."` is prefered to
# `#include "base/allocator/partition_allocator/src/partition_alloc/..."`.
"+partition_alloc",
"-base/allocator/partition_allocator",
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
@@ -125,193 +125,165 @@ struct DispatcherImpl {
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(const AllocatorDispatch* self,
size_t size,
void* context) {
void* const address = self->next->alloc_function(self->next, size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
static void* AllocFn(size_t size, void* context) {
void* const address =
self->next->alloc_unchecked_function(self->next, size, context);
allocator_dispatch_.next->alloc_function(size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
void* const address = self->next->alloc_zero_initialized_function(
self->next, n, size, context);
static void* AllocUncheckedFn(size_t size, void* context) {
void* const address =
allocator_dispatch_.next->alloc_unchecked_function(size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AllocZeroInitializedFn(size_t n, size_t size, void* context) {
void* const address =
allocator_dispatch_.next->alloc_zero_initialized_function(n, size,
context);
DoNotifyAllocationForShim(address, n * size);
return address;
}
static void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
static void* AllocAlignedFn(size_t alignment, size_t size, void* context) {
void* const address = allocator_dispatch_.next->alloc_aligned_function(
alignment, size, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
static void* ReallocFn(void* address, size_t size, void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
void* const reallocated_address =
self->next->realloc_function(self->next, address, size, context);
allocator_dispatch_.next->realloc_function(address, size, context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void* ReallocUncheckedFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
static void* ReallocUncheckedFn(void* address, size_t size, void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
void* const reallocated_address = self->next->realloc_unchecked_function(
self->next, address, size, context);
void* const reallocated_address =
allocator_dispatch_.next->realloc_unchecked_function(address, size,
context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void FreeFn(void* address, void* context) {
// Note: DoNotifyFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread.
// That would be racy otherwise.
DoNotifyFreeForShim(address);
MUSTTAIL return self->next->free_function(self->next, address, context);
MUSTTAIL return allocator_dispatch_.next->free_function(address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
static unsigned BatchMallocFn(size_t size,
void** results,
unsigned num_requested,
void* context) {
unsigned const num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
unsigned const num_allocated =
allocator_dispatch_.next->batch_malloc_function(size, results,
num_requested, context);
for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocationForShim(results[i], size);
}
return num_allocated;
}
static void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
static void BatchFreeFn(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFreeForShim(to_be_freed[i]);
}
MUSTTAIL return self->next->batch_free_function(self->next, to_be_freed,
num_to_be_freed, context);
MUSTTAIL return allocator_dispatch_.next->batch_free_function(
to_be_freed, num_to_be_freed, context);
}
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
static void FreeDefiniteSizeFn(void* address, size_t size, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return self->next->free_definite_size_function(self->next, address,
size, context);
MUSTTAIL return allocator_dispatch_.next->free_definite_size_function(
address, size, context);
}
static void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void TryFreeDefaultFn(void* address, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return self->next->try_free_default_function(self->next, address,
context);
MUSTTAIL return allocator_dispatch_.next->try_free_default_function(
address, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
static void* AlignedMallocFn(size_t size, size_t alignment, void* context) {
void* const address = allocator_dispatch_.next->aligned_malloc_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedMallocUncheckedFn(const AllocatorDispatch* self,
size_t size,
static void* AlignedMallocUncheckedFn(size_t size,
size_t alignment,
void* context) {
void* const address = self->next->aligned_malloc_unchecked_function(
self->next, size, alignment, context);
void* const address =
allocator_dispatch_.next->aligned_malloc_unchecked_function(
size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
static void* AlignedReallocFn(void* address,
size_t size,
size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
address = allocator_dispatch_.next->aligned_realloc_function(
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocUncheckedFn(const AllocatorDispatch* self,
void* address,
static void* AlignedReallocUncheckedFn(void* address,
size_t size,
size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
address = self->next->aligned_realloc_unchecked_function(
self->next, address, size, alignment, context);
address = allocator_dispatch_.next->aligned_realloc_unchecked_function(
address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
static void AlignedFreeFn(void* address, void* context) {
DoNotifyFreeForShim(address);
MUSTTAIL return self->next->aligned_free_function(self->next, address,
context);
MUSTTAIL return allocator_dispatch_.next->aligned_free_function(address,
context);
}
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
@@ -33,8 +33,9 @@ struct BASE_EXPORT ReentryGuard {
}
ALWAYS_INLINE ~ReentryGuard() {
if (LIKELY(allowed_))
if (allowed_) [[likely]] {
pthread_setspecific(entered_key_, nullptr);
}
}
explicit operator bool() const noexcept { return allowed_; }
@@ -244,7 +244,7 @@ struct ThreadLocalStorage {
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
if (UNLIKELY(slot == nullptr)) {
if (slot == nullptr) [[unlikely]] {
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
// We might be called in the course of handling a memory allocation. We do
@@ -133,7 +133,11 @@ constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, "enabled-processes",
#if PA_BUILDFLAG(IS_MAC) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kAllProcesses,
#endif
&kBackupRefPtrEnabledProcessesOptions};
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
@@ -167,6 +171,15 @@ const base::FeatureParam<MemtagMode> kMemtagModeParam{
#endif
&kMemtagModeOptions};
constexpr FeatureParam<RetagMode>::Option kRetagModeOptions[] = {
{RetagMode::kIncrement, "increment"},
{RetagMode::kRandom, "random"},
};
const base::FeatureParam<RetagMode> kRetagModeParam{
&kPartitionAllocMemoryTagging, "retag-mode", RetagMode::kIncrement,
&kRetagModeOptions};
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
kMemoryTaggingEnabledProcessesOptions[] = {
{MemoryTaggingEnabledProcesses::kBrowserOnly, "browser-only"},
@@ -100,6 +100,14 @@ enum class MemtagMode {
kAsync,
};
enum class RetagMode {
// Allocations are retagged by incrementing the current tag.
kIncrement,
// Allocations are retagged with a random tag.
kRandom,
};
enum class MemoryTaggingEnabledProcesses {
// Memory tagging enabled only in the browser process.
kBrowserOnly,
@@ -139,6 +147,7 @@ extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
extern const BASE_EXPORT base::FeatureParam<RetagMode> kRetagModeParam;
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
kMemoryTaggingEnabledProcessesParam;
// Kill switch for memory tagging. Skips any code related to memory tagging when
@@ -15,6 +15,7 @@
#include "base/allocator/partition_alloc_features.h"
#include "base/at_exit.h"
#include "base/check.h"
#include "base/containers/span.h"
#include "base/cpu.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/stack_trace.h"
@@ -602,14 +603,22 @@ void CheckDanglingRawPtrBufferEmpty() {
<< entry->task_trace << "\n"
<< entry->stack_trace << "\n";
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER)
auto is_frame_ptr_not_null = [](const void* frame_ptr) {
return frame_ptr != nullptr;
};
std::vector<std::array<const void*, 32>> stack_traces =
internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
for (const auto& raw_stack_trace : stack_traces) {
CHECK(ranges::is_partitioned(raw_stack_trace, is_frame_ptr_not_null))
<< "`raw_stack_trace` is expected to be partitioned: non-null values "
"at the begining followed by `nullptr`s.";
LOG(ERROR) << "Dangling reference from:\n";
LOG(ERROR) << debug::StackTrace(raw_stack_trace.data(),
raw_stack_trace.size() -
static_cast<size_t>(ranges::count(
raw_stack_trace, nullptr)))
LOG(ERROR) << debug::StackTrace(
// This call truncates the `nullptr` tail of the stack
// trace (see the `is_partitioned` CHECK above).
make_span(raw_stack_trace.begin(),
ranges::partition_point(
raw_stack_trace, is_frame_ptr_not_null)))
<< "\n";
}
#else
@@ -83,6 +83,19 @@ declare_args() {
use_partition_alloc_as_malloc_default
}
declare_args() {
# Whether PartitionAlloc dispatch can be replaced with another dispatch with
# some more safety checks at runtime or not. When true, the allocator shim
# provides an extended API to swap PartitionAlloc.
# TODO(https://crbug.com/351974425): Enable this when `use_partition_alloc_as_malloc` is true.
enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support =
false
}
assert(
!enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support || use_partition_alloc_as_malloc,
"PartitionAlloc with advanced checks requires PartitionAlloc itself.")
assert(!use_allocator_shim || (is_android || is_apple || is_chromeos ||
is_fuchsia || is_linux || is_win),
"The allocator shim does not (yet) support the platform.")
@@ -92,8 +105,23 @@ if (use_allocator_shim && is_win) {
# build, and it's very easy to override it partially and to be inconsistent
# among allocations and deallocations. Then, we'll crash when PA deallocates
# a memory region allocated by the CRT's malloc or vice versa.
assert(!is_component_build,
"The allocator shim doesn't work for the component build on Windows.")
# Since PartitionAlloc depends on libc++, it is difficult to link libc++.dll
# with PartitionAlloc to replace its allocator with PartitionAlloc.
# If using libcxx_is_shared=true,
# a. since inline methods or inline functions defined in some libc++ headers,
# e.g. vector, use new, malloc(), and so on, the memory allocation will
# be done inside a client code.
# b. on the other hand, libc++.dll deallocates the memory allocated by the
# inline methods or inline functions. It will not be run inside the client
# code.
# So a.'s allocation is done by PartitionAlloc, but b.'s deallocation is
# done by system allocator. This will cause heap check failure (WinHeap
# doesn't know PartitionAlloc) and crash.
# If libcxx_is_shared=false, libc++ is a static library. All libc++ code
# will be run inside the client. The above issue will disappear.
assert(
!is_component_build || (!libcxx_is_shared && !is_debug),
"The allocator shim for the Windows component build needs !libcxx_is_shared && !is_debug.")
}
declare_args() {
@@ -40,6 +40,12 @@ enable_pointer_compression =
# as a buildflag.
dchecks_are_on = is_debug || dcheck_always_on
# Building PartitionAlloc for Windows component build.
# Currently use build_with_chromium not to affect any third_party code,
# but if any third_party code wants to use, remove build_with_chromium.
use_partition_alloc_as_malloc_on_win_component_build =
build_with_chromium && is_win && is_component_build
# TODO(crbug.com/40276913): Split PartitionAlloc into a public and
# private parts. The public config would include add the "./include" dir and
# the private config would add the "./src" dir.
@@ -162,6 +168,7 @@ pa_buildflag_header("buildflags") {
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT=$enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support",
"DCHECKS_ARE_ON=$dchecks_are_on",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
@@ -225,11 +232,13 @@ component("raw_ptr") {
sources += [ "pointers/raw_ptr_noop_impl.h" ]
sources += [ "pointers/empty.cc" ]
}
public_deps = [ ":build_config" ]
public_deps = [
":build_config",
":buildflags",
]
if (use_partition_alloc) {
public_deps += [ ":partition_alloc" ]
}
deps = [ ":buildflags" ]
# See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ]
@@ -330,12 +339,40 @@ if (is_clang_or_gcc) {
":allocator_base",
":allocator_core",
":allocator_shim",
":buildflags",
]
}
if (is_win && is_component_build) {
group("win_component_build_adapter") {
# Currently guard this target by using build_with_chromium to avoid
# any issues on third_party build. But if any third_party code wants to
# use allocator_shim for its own component build, we will remove this
# guard.
if (build_with_chromium) {
if (use_allocator_shim) {
public_deps = [
":allocator_base",
":allocator_shim",
]
}
}
# If not with chromium, currently do nothing.
}
}
component("allocator_core") {
visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [
"aarch64_support.h",
"address_pool_manager.cc",
@@ -484,6 +521,12 @@ if (is_clang_or_gcc) {
":wexit_time_destructors",
]
deps = [ ":allocator_base" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
public_configs = []
if (is_fuchsia) {
deps += [
@@ -527,6 +570,14 @@ if (is_clang_or_gcc) {
component("allocator_base") {
visibility = [ ":*" ]
if (use_partition_alloc_as_malloc_on_win_component_build) {
# Since common_deps defined in //build/config/BUILD.gn depends on
# PartitionAlloc for PartitionAlloc-Everywhere, we need no_default_deps
# here, because default deps includes common_deps dependency.
# Without no_defaults_deps=true, we will see cyclic deps:
# common_deps=>PartitionAlloc=>common_deps
no_default_deps = true
}
sources = [
"partition_alloc_base/atomic_ref_count.h",
@@ -706,6 +757,11 @@ if (is_clang_or_gcc) {
]
deps = []
if (use_partition_alloc_as_malloc_on_win_component_build) {
# We need to add explicit libc++ dependency here because of
# no_default_deps=true.
deps += [ "//buildtools/third_party/libc++:libc++" ]
}
if (is_fuchsia) {
public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
}
@@ -761,6 +817,10 @@ if (is_clang_or_gcc) {
shim_headers +=
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.h" ]
}
if (enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support) {
shim_sources += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.cc" ]
shim_headers += [ "shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h" ]
}
if (is_android) {
shim_headers += [
"shim/allocator_shim_override_cpp_symbols.h",
@@ -556,7 +556,7 @@ void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool.
// in Pool. It is never called.
void AddressPoolManager::AssertThreadIsolatedLayout() {
constexpr size_t last_pool_offset =
offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
@@ -113,6 +113,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool GetStats(AddressSpaceStats* stats);
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool. It is never called.
static void AssertThreadIsolatedLayout();
#endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@@ -180,9 +180,9 @@ class EncodedNextFreelistEntry {
// Regular freelists always point to an entry within the same super page.
//
// This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY(entry &&
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
if (entry && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))
[[unlikely]] {
FreelistCorruptionDetected(0);
}
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
@@ -220,7 +220,7 @@ class EncodedNextFreelistEntry {
}
auto* ret = encoded_next_.Decode();
if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(this, ret))) {
if (!IsWellFormed<for_thread_cache>(this, ret)) [[unlikely]] {
if constexpr (crash_on_corruption) {
// Put the corrupted data on the stack, it may give us more information
// about what kind of corruption that was.
@@ -46,12 +46,6 @@ constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
return (superset & subset) == subset;
}
// Removes flags `target` from `from`.
template <typename EnumType>
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
return from & ~target;
}
// A macro to define binary arithmetic over `EnumType`.
// Use inside `namespace partition_alloc::internal`.
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \
@@ -42,8 +42,7 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
const size_t kSuperPagePayloadStartOffset =
internal::SuperPagePayloadStartOffset(
/* is_managed_by_normal_buckets = */ true,
/* with_quarantine = */ false);
/* is_managed_by_normal_buckets = */ true);
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
kSuperPagePayloadStartOffset;
@@ -102,7 +101,7 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
auto slot_start = slot_span_start + slot_idx * kSlotSize;
PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
kSlotSize)
->InitalizeForGwpAsan();
->InitializeForGwpAsan();
size_t global_slot_idx = (slot_start - super_page_span_start -
kSuperPageGwpAsanSlotAreaBeginOffset) /
kSlotSize;
@@ -215,8 +215,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// If a dangling raw_ptr<> was detected, report it.
if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
kDanglingRawPtrDetectedBit)) {
if ((old_count & kDanglingRawPtrDetectedBit) == kDanglingRawPtrDetectedBit)
[[unlikely]] {
partition_alloc::internal::DanglingRawPtrReleased(
reinterpret_cast<uintptr_t>(this));
}
@@ -264,13 +264,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// detection mechanism isn't perfect, because in-slot-metadata can be
// overwritten by the freelist pointer (or its shadow) for very small slots,
// thus masking the error away.
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
if (!(old_count & kMemoryHeldByAllocatorBit)) [[unlikely]] {
DoubleFreeOrCorruptionDetected(old_count);
}
// Release memory when no raw_ptr<> exists anymore:
static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
if (PA_LIKELY((old_count & mask) == 0)) {
if ((old_count & mask) == 0) [[likely]] {
std::atomic_thread_fence(std::memory_order_acquire);
// The allocation is about to get freed, so clear the cookie.
ClearCookieIfSupported();
@@ -317,6 +317,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// Request to quarantine this allocation. The request might be ignored if
// the allocation is already freed.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE void SetQuarantineRequest() {
CountType old_count =
count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed);
@@ -325,6 +327,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
}
// Get and clear out quarantine request.
// TODO(crbug.com/329027914) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE bool PopQuarantineRequest() {
CountType old_count =
count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel);
@@ -337,7 +341,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// make sure the `raw_ptr<T>` release operation will never attempt to call the
// PA `free` on such a slot. GWP-ASan takes the extra reference into account
// when determining whether the slot can be reused.
PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
PA_ALWAYS_INLINE void InitializeForGwpAsan() {
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
brp_cookie_ = CalculateCookie();
#endif
@@ -369,7 +373,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be
// zero when there are no unexpected dangling pointers.
if (PA_LIKELY((count & kPtrCountMask) == 0)) {
if ((count & kPtrCountMask) == 0) [[likely]] {
return;
}
@@ -404,9 +408,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
// - A raw_ptr<T, DisableDanglingPtrDetection>
//
// Assuming this raw_ptr is not dangling, the memory must still be held at
// least by the allocator, so this is PA_LIKELY true.
if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
kUnprotectedPtrCountMask)))) {
// least by the allocator, so this is `[[likely]]`.
if ((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
kUnprotectedPtrCountMask))) [[likely]] {
return false; // Do not release the memory.
}
@@ -542,7 +546,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
// the InSlotMetadata object out-of-line in this case, specifically in a
// special table after the super page metadata (see InSlotMetadataTable in
// partition_alloc_constants.h).
if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
if (slot_start & SystemPageOffsetMask()) [[likely]] {
uintptr_t refcount_address =
slot_start + slot_size - sizeof(InSlotMetadata);
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \
@@ -48,6 +48,8 @@ T* ConstructAtInternalPartition(Args&&... args) {
}
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr) {
// Destroying an array is not supported.
@@ -67,6 +67,8 @@ template <typename T, typename... Args>
T* ConstructAtInternalPartition(Args&&... args);
// Destroy an object on heap in the internal partition.
// TODO(crbug.com/40274826) This is an unused function. Start using it in tests
// and/or in production code.
template <typename T>
void DestroyAtInternalPartition(T* ptr);
@@ -91,7 +91,7 @@ PageAllocationGranularityShift() {
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (PA_UNLIKELY(shift == 0)) {
if (shift == 0) [[unlikely]] {
shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
page_characteristics.shift.store(shift, std::memory_order_relaxed);
@@ -132,7 +132,7 @@ PageAllocationGranularity() {
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
// initialize cache.
size_t size = page_characteristics.size.load(std::memory_order_relaxed);
if (PA_UNLIKELY(size == 0)) {
if (size == 0) [[unlikely]] {
size = static_cast<size_t>(getpagesize());
page_characteristics.size.store(size, std::memory_order_relaxed);
}
@@ -533,7 +533,7 @@ void PartitionAddressSpace::MapMetadata(uintptr_t super_page,
PA_CHECK(ptr != MAP_FAILED);
PA_CHECK(ptr == reinterpret_cast<void*>(writable_metadata));
if (PA_UNLIKELY(copy_metadata)) {
if (copy_metadata) [[unlikely]] {
// Copy the metadata from the private and copy-on-write page to
// the shared page. (=update the memory file)
memcpy(reinterpret_cast<void*>(writable_metadata),
@@ -321,6 +321,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// mean the given |ptr| is valid. Because we don't use the entire address
// space for the shadow. We only use 2 SystemPageSize() / kSuperPageSize(%)
// of the space. See PoolShadowOffset().
//
// TODO(crbug.com/40238514) This is an unused function. Start using it in
// tests and/or in production code.
PA_ALWAYS_INLINE static bool IsInPoolShadow(const void* ptr) {
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(ptr);
return (pool_shadow_address_ <= ptr_as_uintptr &&
@@ -513,14 +516,20 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
#if !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
#endif
return internal::PartitionAddressSpace::IsInRegularPool(address)
return
#if PA_BUILDFLAG(GLUE_CORE_POOLS)
internal::PartitionAddressSpace::IsInCorePools(address)
#else
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::PartitionAddressSpace::IsInBRPPool(address)
internal::PartitionAddressSpace::IsInBRPPool(address) ||
#endif
internal::PartitionAddressSpace::IsInRegularPool(address)
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
|| internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
|| internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
#endif
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
// Returns false for nullptr.
@@ -79,7 +79,7 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) {
if (counter == 0) [[unlikely]] {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(RandomValue());
}
@@ -87,24 +87,22 @@ PA_ALWAYS_INLINE constexpr
typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
CountlZero(T value) {
static_assert(bits > 0, "invalid instantiation");
if (value) [[likely]] {
#if PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
// We would prefer to use the _BitScanReverse(64) intrinsics, but they
// aren't constexpr and thus unusable here.
if (PA_LIKELY(value)) {
// We would prefer to use the _BitScanReverse(64) intrinsics, but they
// aren't constexpr and thus unusable here.
int leading_zeros = 0;
constexpr T kMostSignificantBitMask = 1ull << (bits - 1);
for (; !(value & kMostSignificantBitMask); value <<= 1, ++leading_zeros) {
}
return leading_zeros;
#else
return bits == 64
? __builtin_clzll(static_cast<uint64_t>(value))
: __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
#endif
}
return bits;
#else
return PA_LIKELY(value)
? bits == 64
? __builtin_clzll(static_cast<uint64_t>(value))
: __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
: bits;
#endif // PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
}
// Backport of C++20 std::countr_zero in <bit>.
@@ -115,24 +113,21 @@ template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE constexpr
typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
CountrZero(T value) {
if (value) [[likely]] {
#if PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
// We would prefer to use the _BitScanForward(64) intrinsics, but they
// aren't constexpr and thus unusable here.
if (PA_LIKELY(value)) {
// We would prefer to use the _BitScanForward(64) intrinsics, but they
// aren't constexpr and thus unusable here.
int trailing_zeros = 0;
constexpr T kLeastSignificantBitMask = 1ull;
for (; !(value & kLeastSignificantBitMask); value >>= 1, ++trailing_zeros) {
}
return trailing_zeros;
#else
return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value));
#endif
}
return bits;
#else
return PA_LIKELY(value) ? bits == 64
? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value))
: bits;
#endif // PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
}
// Backport of C++20 std::bit_width in <bit>.
@@ -147,23 +147,31 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
#if defined(OFFICIAL_BUILD) && !PA_BUILDFLAG(DCHECKS_ARE_ON)
// TODO(crbug.com/357081797): Use `[[unlikely]]` instead when there's a way to
// switch the expression below to a statement without breaking
// -Wthread-safety-analysis.
#if PA_HAS_BUILTIN(__builtin_expect)
#define PA_BASE_INTERNAL_EXPECT_FALSE(cond) __builtin_expect(!(cond), 0)
#else
#define PA_BASE_INTERNAL_EXPECT_FALSE(cond) !(cond)
#endif
// Discard log strings to reduce code bloat.
//
// This is not calling BreakDebugger since this is called frequently, and
// calling an out-of-line function instead of a noreturn inline macro prevents
// compiler optimizations.
#define PA_BASE_CHECK(condition) \
PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
: PA_EAT_CHECK_STREAM_PARAMS()
#define PA_BASE_CHECK(cond) \
PA_BASE_INTERNAL_EXPECT_FALSE(cond) ? PA_IMMEDIATE_CRASH() \
: PA_EAT_CHECK_STREAM_PARAMS()
#define PA_BASE_CHECK_WILL_STREAM() false
#define PA_BASE_PCHECK(condition) \
#define PA_BASE_PCHECK(cond) \
PA_LAZY_CHECK_STREAM( \
::partition_alloc::internal::logging::check_error::PCheck(__FILE__, \
__LINE__) \
.stream(), \
PA_UNLIKELY(!(condition)))
PA_BASE_INTERNAL_EXPECT_FALSE(cond))
#else
@@ -66,6 +66,16 @@
#define PA_NOT_TAIL_CALLED
#endif
// Annotate a function indicating it must be tail called.
// Can be used only on return statements, even for functions returning void.
// Caller and callee must have the same number of arguments and its types must
// be "similar".
#if defined(__clang__) && PA_HAS_ATTRIBUTE(musttail)
#define PA_MUSTTAIL [[clang::musttail]]
#else
#define PA_MUSTTAIL
#endif
// Specify memory alignment for structs, classes, etc.
// Use like:
// class PA_ALIGNAS(16) MyClass { ... }
@@ -67,9 +67,11 @@ class CheckedNumeric {
#endif
constexpr bool
AssignIfValid(Dst* result) const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? ((*result = static_cast<Dst>(state_.value())), true)
: false;
if (IsValid<Dst>()) [[likely]] {
*result = static_cast<Dst>(state_.value());
return true;
}
return false;
}
// ValueOrDie() - The primary accessor for the underlying value. If the
@@ -82,9 +84,10 @@ class CheckedNumeric {
// the underlying value, and it is not available through other means.
template <typename Dst = T, class CheckHandler = CheckOnFailure>
constexpr StrictNumeric<Dst> ValueOrDie() const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? static_cast<Dst>(state_.value())
: CheckHandler::template HandleFailure<Dst>();
if (IsValid<Dst>()) [[likely]] {
return static_cast<Dst>(state_.value());
}
return CheckHandler::template HandleFailure<Dst>();
}
// ValueOrDefault(T default_value) - A convenience method that returns the
@@ -95,9 +98,10 @@ class CheckedNumeric {
// if the supplied default_value is not within range of the destination type.
template <typename Dst = T, typename Src>
constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? static_cast<Dst>(state_.value())
: checked_cast<Dst>(default_value);
if (IsValid<Dst>()) [[likely]] {
return static_cast<Dst>(state_.value());
}
return checked_cast<Dst>(default_value);
}
// Returns a checked numeric of the specified type, cast from the current
@@ -63,9 +63,8 @@ struct CheckedAddOp<T,
FastPromotion>::type;
// Fail if either operand is out of range for the promoted type.
// TODO(jschuh): This could be made to work for a broader range of values.
if (PA_BASE_NUMERICS_UNLIKELY(
!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y))) {
if (!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) [[unlikely]] {
return false;
}
@@ -130,9 +129,8 @@ struct CheckedSubOp<T,
FastPromotion>::type;
// Fail if either operand is out of range for the promoted type.
// TODO(jschuh): This could be made to work for a broader range of values.
if (PA_BASE_NUMERICS_UNLIKELY(
!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y))) {
if (!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) [[unlikely]] {
return false;
}
@@ -192,10 +190,9 @@ struct CheckedMulOp<T,
using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
// Verify the destination type can hold the result (always true for 0).
if (PA_BASE_NUMERICS_UNLIKELY(
(!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x && y)) {
if ((!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x && y) [[unlikely]] {
return false;
}
@@ -232,27 +229,24 @@ struct CheckedDivOp<T,
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if (PA_BASE_NUMERICS_UNLIKELY(!y)) {
if (!y) [[unlikely]] {
return false;
}
// The overflow check can be compiled away if we don't have the exact
// combination of types needed to trigger this case.
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (PA_BASE_NUMERICS_UNLIKELY(
(std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)))) {
if (std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)) [[unlikely]] {
return false;
}
// This branch always compiles away if the above branch wasn't removed.
if (PA_BASE_NUMERICS_UNLIKELY(
(!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x)) {
if ((!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x) [[unlikely]] {
return false;
}
@@ -276,17 +270,15 @@ struct CheckedModOp<T,
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if (PA_BASE_NUMERICS_UNLIKELY(!y)) {
if (!y) [[unlikely]] {
return false;
}
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (PA_BASE_NUMERICS_UNLIKELY(
(std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)))) {
if (std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)) [[unlikely]] {
*result = 0;
return true;
}
@@ -316,9 +308,9 @@ struct CheckedLshOp<T,
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
// Disallow negative numbers and verify the shift is in bounds.
if (PA_BASE_NUMERICS_LIKELY(
!IsValueNegative(x) &&
as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))) {
if (!IsValueNegative(x) &&
as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))
[[likely]] {
// Shift as unsigned to avoid undefined behavior.
*result = static_cast<V>(as_unsigned(x) << shift);
// If the shift can be reversed, we know it was valid.
@@ -350,8 +342,7 @@ struct CheckedRshOp<T,
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
// Use sign conversion to push negative values out of range.
if (PA_BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >=
IntegerBitsPlusSign<T>::value)) {
if (as_unsigned(shift) >= IntegerBitsPlusSign<T>::value) [[unlikely]] {
return false;
}
@@ -86,9 +86,10 @@ struct ClampedAddOp<T,
"provided types.");
const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
? result
: saturated;
if (CheckedAddOp<T, U>::Do(x, y, &result)) [[likely]] {
return result;
}
return saturated;
}
};
@@ -113,9 +114,10 @@ struct ClampedSubOp<T,
"provided types.");
const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
? result
: saturated;
if (CheckedSubOp<T, U>::Do(x, y, &result)) [[likely]] {
return result;
}
return saturated;
}
};
@@ -137,9 +139,10 @@ struct ClampedMulOp<T,
V result = {};
const V saturated =
CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
return PA_BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
? result
: saturated;
if (CheckedMulOp<T, U>::Do(x, y, &result)) [[likely]] {
return result;
}
return saturated;
}
};
@@ -155,7 +158,7 @@ struct ClampedDivOp<T,
template <typename V = result_type>
static constexpr V Do(T x, U y) {
V result = {};
if (PA_BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result)))) {
if ((CheckedDivOp<T, U>::Do(x, y, &result))) [[likely]] {
return result;
}
// Saturation goes to max, min, or NaN (if x is zero).
@@ -176,9 +179,10 @@ struct ClampedModOp<T,
template <typename V = result_type>
static constexpr V Do(T x, U y) {
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
? result
: x;
if (CheckedModOp<T, U>::Do(x, y, &result)) [[likely]] {
return result;
}
return x;
}
};
@@ -196,11 +200,11 @@ struct ClampedLshOp<T,
template <typename V = result_type>
static constexpr V Do(T x, U shift) {
static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
if (PA_BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
if (shift < std::numeric_limits<T>::digits) [[likely]] {
// Shift as unsigned to avoid undefined behavior.
V result = static_cast<V>(as_unsigned(x) << shift);
// If the shift can be reversed, we know it was valid.
if (PA_BASE_NUMERICS_LIKELY(result >> shift == x)) {
if (result >> shift == x) [[likely]] {
return result;
}
}
@@ -223,9 +227,10 @@ struct ClampedRshOp<T,
static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
// Signed right shift is odd, because it saturates to -1 or 0.
const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
return PA_BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
? saturated_cast<V>(x >> shift)
: saturated;
if (shift < IntegerBitsPlusSign<T>::value) [[likely]] {
return saturated_cast<V>(x >> shift);
}
return saturated;
}
};
@@ -108,9 +108,10 @@ constexpr Dst checked_cast(Src value) {
// This throws a compile-time error on evaluating the constexpr if it can be
// determined at compile-time as failing, otherwise it will CHECK at runtime.
using SrcType = typename internal::UnderlyingType<Src>::type;
return PA_BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
? static_cast<Dst>(static_cast<SrcType>(value))
: CheckHandler::template HandleFailure<Dst>();
if (IsValueInRangeForNumericType<Dst>(value)) [[likely]] {
return static_cast<Dst>(static_cast<SrcType>(value));
}
return CheckHandler::template HandleFailure<Dst>();
}
// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
@@ -188,9 +189,10 @@ struct SaturateFastOp<Dst,
const Dst saturated = CommonMaxOrMin<Dst, Src>(
IsMaxInRangeForNumericType<Dst, Src>() ||
(!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
return PA_BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
? static_cast<Dst>(value)
: saturated;
if (IsValueInRangeForNumericType<Dst>(value)) [[likely]] {
return static_cast<Dst>(value);
}
return saturated;
}
};
@@ -9,14 +9,6 @@
#include <limits>
#include <type_traits>
#if defined(__GNUC__) || defined(__clang__)
#define PA_BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
#define PA_BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
#define PA_BASE_NUMERICS_LIKELY(x) (x)
#define PA_BASE_NUMERICS_UNLIKELY(x) (x)
#endif
namespace partition_alloc::internal::base::internal {
// The std library doesn't provide a binary max_exponent for integers, however
@@ -873,6 +873,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) TimeTicks
// realtime clock to establish a reference point. This function will return
// the same value for the duration of the application, but will be different
// in future application runs.
// DEPRECATED:
// Because TimeTicks increments can get suspended on some platforms (e.g. Mac)
// and because this function returns a static value, this value will not get
// suspension time into account on those platforms.
// As TimeTicks is intended to be used to track a process duration and not an
// absolute time, if you plan to use this function, please consider using a
// Time instead.
// TODO(crbug.com/355423207): Remove function.
static TimeTicks UnixEpoch();
// Returns |this| snapped to the next tick, given a |tick_phase| and
@@ -28,9 +28,7 @@
// For official build discard log strings to reduce binary bloat.
// See base/check.h for implementation details.
#define PA_CHECK(condition) \
PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
: PA_EAT_CHECK_STREAM_PARAMS()
#define PA_CHECK(condition) PA_BASE_CHECK(condition)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_DCHECK(condition) PA_CHECK(condition)
@@ -351,18 +351,6 @@ static_assert(kThreadIsolatedPoolHandle == kNumPools,
// of large areas which are less likely to benefit from MTE protection.
constexpr size_t kMaxMemoryTaggingSize = 1024;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
// Returns whether the tag of |object| overflowed, meaning the containing slot
// needs to be moved to quarantine.
PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
// The tag with which the slot is put to quarantine.
constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
static_assert((kOverflowTag & kPtrTagMask) != 0,
"Overflow tag must be in tag bits");
return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
}
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumPartitionPagesPerSuperPage() {
return kSuperPageSize >> PartitionPageShift();
@@ -46,6 +46,16 @@ template <typename Z>
static constexpr bool is_offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
enum class MetadataKind { kWritable, kReadOnly };
template <const MetadataKind kind, typename T>
struct MaybeConst {
using Type = std::conditional_t<kind == MetadataKind::kReadOnly, T const, T>;
};
template <const MetadataKind kind, typename T>
using MaybeConstT = MaybeConst<kind, T>::Type;
} // namespace internal
class PartitionStatsDumper;
@@ -187,7 +187,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
PartitionRootLock(root).AssertAcquired();
const bool return_null = ContainsFlags(flags, AllocFlags::kReturnNull);
if (PA_UNLIKELY(raw_size > MaxDirectMapped())) {
if (raw_size > MaxDirectMapped()) [[unlikely]] {
if (return_null) {
return nullptr;
}
@@ -216,7 +216,8 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
PartitionExcessiveAllocationSize(raw_size);
}
PartitionDirectMapExtent* map_extent = nullptr;
ReadOnlyPartitionDirectMapExtent* map_extent = nullptr;
WritablePartitionDirectMapExtent* writable_map_extent = nullptr;
PartitionPageMetadata* page_metadata = nullptr;
{
@@ -266,7 +267,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
#endif
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
}
if (PA_UNLIKELY(!reservation_start)) {
if (!reservation_start) [[unlikely]] {
if (return_null) {
return nullptr;
}
@@ -331,7 +332,8 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
}
auto* super_page_extent = PartitionSuperPageToExtent(reservation_start);
super_page_extent->root = root;
auto* writable_super_page_extent = super_page_extent->ToWritable(root);
writable_super_page_extent->root = root;
// The new structures are all located inside a fresh system page so they
// will all be zeroed out. These DCHECKs are for documentation and to assert
// our expectations of the kernel.
@@ -339,7 +341,8 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
PA_DCHECK(!super_page_extent->next);
PartitionPageMetadata* first_page_metadata =
reinterpret_cast<PartitionPageMetadata*>(super_page_extent) + 1;
reinterpret_cast<PartitionPageMetadata*>(writable_super_page_extent) +
1;
page_metadata = PartitionPageMetadata::FromAddr(slot_start);
// |first_page_metadata| and |page_metadata| may be equal, if there is no
// alignment padding.
@@ -353,7 +356,12 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
page_metadata - first_page_metadata;
}
auto* direct_map_metadata =
reinterpret_cast<PartitionDirectMapMetadata*>(page_metadata);
reinterpret_cast<ReadOnlyPartitionDirectMapMetadata*>(page_metadata);
// TODO(crbug.com/40238514): |page_metadata| will be
// |writable_page_metadata|, because |page_metadata| points to a readonly
// metadata inside the giga cage.
auto* writable_direct_map_metadata =
reinterpret_cast<WritablePartitionDirectMapMetadata*>(page_metadata);
// Since direct map metadata is larger than PartitionPageMetadata, make sure
// the first and the last bytes are on the same system page, i.e. within the
// super page metadata region.
@@ -361,7 +369,8 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
base::bits::AlignDown(reinterpret_cast<uintptr_t>(direct_map_metadata),
SystemPageSize()) ==
base::bits::AlignDown(reinterpret_cast<uintptr_t>(direct_map_metadata) +
sizeof(PartitionDirectMapMetadata) - 1,
sizeof(ReadOnlyPartitionDirectMapMetadata) -
1,
SystemPageSize()));
PA_DCHECK(page_metadata == &direct_map_metadata->page_metadata);
page_metadata->is_valid = true;
@@ -376,7 +385,8 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
PA_DCHECK(!direct_map_metadata->second_page_metadata
.subsequent_page_metadata.raw_size);
// Raw size is set later, by the caller.
direct_map_metadata->second_page_metadata.slot_span_metadata_offset = 1;
writable_direct_map_metadata->second_page_metadata
.slot_span_metadata_offset = 1;
PA_DCHECK(!direct_map_metadata->bucket.active_slot_spans_head);
PA_DCHECK(!direct_map_metadata->bucket.empty_slot_spans_head);
@@ -384,11 +394,12 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
PA_DCHECK(!direct_map_metadata->bucket.num_system_pages_per_slot_span);
PA_DCHECK(!direct_map_metadata->bucket.num_full_slot_spans);
direct_map_metadata->bucket.slot_size = slot_size;
direct_map_metadata->bucket.can_store_raw_size = true;
writable_direct_map_metadata->bucket.slot_size = slot_size;
writable_direct_map_metadata->bucket.can_store_raw_size = true;
new (&page_metadata->slot_span_metadata)
SlotSpanMetadata(&direct_map_metadata->bucket);
// SlotSpanMetadata must point to the bucket inside the giga cage.
new (&page_metadata->slot_span_metadata) SlotSpanMetadata(
const_cast<PartitionBucket*>(&direct_map_metadata->bucket));
// It is typically possible to map a large range of inaccessible pages, and
// this is leveraged in multiple places, including the pools. However,
@@ -432,20 +443,22 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
page_metadata->slot_span_metadata.SetFreelistHead(next_entry);
writable_map_extent = &writable_direct_map_metadata->direct_map_extent;
writable_map_extent->reservation_size = reservation_size;
writable_map_extent->padding_for_alignment = padding_for_alignment;
// Point to read-only bucket.
writable_map_extent->bucket = &direct_map_metadata->bucket;
map_extent = &direct_map_metadata->direct_map_extent;
map_extent->reservation_size = reservation_size;
map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &direct_map_metadata->bucket;
}
PartitionRootLock(root).AssertAcquired();
// Maintain the doubly-linked list of all direct mappings.
map_extent->next_extent = root->direct_map_list;
writable_map_extent->next_extent = root->direct_map_list;
if (map_extent->next_extent) {
map_extent->next_extent->prev_extent = map_extent;
map_extent->next_extent->ToWritable(root)->prev_extent = map_extent;
}
map_extent->prev_extent = nullptr;
writable_map_extent->prev_extent = nullptr;
root->direct_map_list = map_extent;
return &page_metadata->slot_span_metadata;
@@ -621,8 +634,8 @@ PA_ALWAYS_INLINE SlotSpanMetadata* PartitionBucket::AllocNewSlotSpan(
uintptr_t adjusted_next_partition_page =
base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
if (PA_UNLIKELY(adjusted_next_partition_page + slot_span_reservation_size >
root->next_partition_page_end)) {
if (adjusted_next_partition_page + slot_span_reservation_size >
root->next_partition_page_end) [[unlikely]] {
// AllocNewSuperPage() may crash (e.g. address space exhaustion), put data
// on stack.
PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
@@ -694,7 +707,7 @@ void PartitionBucket::InitCanStoreRawSize(bool use_small_single_slot_spans) {
// subsequent PartitionPage to store the raw size. It isn't only metadata
// space though, slot spans that have more than one slot can't have raw size
// stored, because we wouldn't know which slot it applies to.
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
if (slot_size <= MaxRegularSlotSpanSize()) [[likely]] {
// Even when the slot size is below the standard floor for single
// slot spans, there exist spans that happen to have exactly one
// slot per. If `use_small_single_slot_spans` is true, we use more
@@ -740,7 +753,7 @@ uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
pool_handle pool = root->ChoosePool();
uintptr_t super_page_span_start = ReserveMemoryFromPool(
pool, requested_address, super_page_count * kSuperPageSize);
if (PA_UNLIKELY(!super_page_span_start)) {
if (!super_page_span_start) [[unlikely]] {
if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
return 0;
}
@@ -763,13 +776,13 @@ uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
PA_ALWAYS_INLINE uintptr_t
PartitionBucket::AllocNewSuperPage(PartitionRoot* root, AllocFlags flags) {
auto super_page = AllocNewSuperPageSpan(root, 1, flags);
if (PA_UNLIKELY(!super_page)) {
if (!super_page) [[unlikely]] {
// If the `kReturnNull` flag isn't set and the allocation attempt fails,
// `AllocNewSuperPageSpan` should've failed with an OOM crash.
PA_DCHECK(ContainsFlags(flags, AllocFlags::kReturnNull));
return 0;
}
return SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed());
return SuperPagePayloadBegin(super_page);
}
PA_ALWAYS_INLINE uintptr_t
@@ -789,8 +802,7 @@ PartitionBucket::InitializeSuperPage(PartitionRoot* root,
root->next_partition_page = payload;
root->next_partition_page_end = root->next_super_page - PartitionPageSize();
PA_DCHECK(payload ==
SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
PA_DCHECK(payload == SuperPagePayloadBegin(super_page));
PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
// Keep the first partition page in the super page inaccessible to serve as a
@@ -837,33 +849,41 @@ PartitionBucket::InitializeSuperPage(PartitionRoot* root,
// We allocated a new super page so update super page metadata.
// First check if this is a new extent or not.
auto* latest_extent = PartitionSuperPageToExtent(super_page);
auto* writable_latest_extent = latest_extent->ToWritable(root);
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(writable_latest_extent->ToReadOnly(root) == latest_extent);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
// By storing the root in every extent metadata object, we have a fast way
// to go from a pointer within the partition to the root object.
latest_extent->root = root;
writable_latest_extent->root = root;
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(writable_latest_extent->root == root);
PA_DCHECK(latest_extent->root == root);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
// Most new extents will be part of a larger extent, and these two fields
// are unused, but we initialize them to 0 so that we get a clear signal
// in case they are accidentally used.
latest_extent->number_of_consecutive_super_pages = 0;
latest_extent->next = nullptr;
latest_extent->number_of_nonempty_slot_spans = 0;
writable_latest_extent->number_of_consecutive_super_pages = 0;
writable_latest_extent->next = nullptr;
writable_latest_extent->number_of_nonempty_slot_spans = 0;
PartitionSuperPageExtentEntry* current_extent = root->current_extent;
ReadOnlyPartitionSuperPageExtentEntry* current_extent = root->current_extent;
const bool is_new_extent = super_page != requested_address;
if (PA_UNLIKELY(is_new_extent)) {
if (PA_UNLIKELY(!current_extent)) {
if (is_new_extent) [[unlikely]] {
if (!current_extent) [[unlikely]] {
PA_DCHECK(!root->first_extent);
root->first_extent = latest_extent;
} else {
PA_DCHECK(current_extent->number_of_consecutive_super_pages);
current_extent->next = latest_extent;
current_extent->ToWritable(root)->next = latest_extent;
}
root->current_extent = latest_extent;
latest_extent->number_of_consecutive_super_pages = 1;
writable_latest_extent->number_of_consecutive_super_pages = 1;
} else {
// We allocated next to an existing extent so just nudge the size up a
// little.
PA_DCHECK(current_extent->number_of_consecutive_super_pages);
++current_extent->number_of_consecutive_super_pages;
++current_extent->ToWritable(root)->number_of_consecutive_super_pages;
PA_DCHECK(payload > SuperPagesBeginFromExtent(current_extent) &&
payload < SuperPagesEndFromExtent(current_extent));
}
@@ -962,7 +982,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
const bool use_tagging =
root->IsMemoryTaggingEnabled() && slot_size <= kMaxMemoryTaggingSize;
if (PA_LIKELY(use_tagging)) {
if (use_tagging) [[likely]] {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size);
}
@@ -977,7 +997,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
while (next_slot_end <= commit_end) {
void* next_slot_ptr;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (PA_LIKELY(use_tagging)) {
if (use_tagging) [[likely]] {
// Ensure the MTE-tag of the memory pointed by other provisioned slot is
// unguessable. They will be returned to the app as is, and the MTE-tag
// will only change upon calling Free().
@@ -1100,7 +1120,7 @@ bool PartitionBucket::SetNewActiveSlotSpan() {
} else if (slot_span->is_empty()) {
slot_span->next_slot_span = empty_slot_spans_head;
empty_slot_spans_head = slot_span;
} else if (PA_LIKELY(slot_span->is_decommitted())) {
} else if (slot_span->is_decommitted()) [[likely]] {
slot_span->next_slot_span = decommitted_slot_spans_head;
decommitted_slot_spans_head = slot_span;
} else {
@@ -1329,7 +1349,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
// SetNewActiveSlotSpan() has a side-effect even when returning
// false where it sweeps the active list and may move things into the empty or
// decommitted lists which affects the subsequent conditional.
if (PA_UNLIKELY(is_direct_mapped())) {
if (is_direct_mapped()) [[unlikely]] {
PA_DCHECK(raw_size > kMaxBucketed);
PA_DCHECK(this == &root->sentinel_bucket);
PA_DCHECK(active_slot_spans_head ==
@@ -1347,17 +1367,17 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
}
// Memory from PageAllocator is always zeroed.
*is_already_zeroed = true;
} else if (PA_LIKELY(!allocate_aligned_slot_span && SetNewActiveSlotSpan())) {
} else if (!allocate_aligned_slot_span && SetNewActiveSlotSpan()) [[likely]] {
// First, did we find an active slot span in the active list?
new_slot_span = active_slot_spans_head;
PA_DCHECK(new_slot_span->is_active());
} else if (PA_LIKELY(!allocate_aligned_slot_span &&
(empty_slot_spans_head != nullptr ||
decommitted_slot_spans_head != nullptr))) {
} else if (!allocate_aligned_slot_span &&
(empty_slot_spans_head != nullptr ||
decommitted_slot_spans_head != nullptr)) [[likely]] {
// Second, look in our lists of empty and decommitted slot spans.
// Check empty slot spans first, which are preferred, but beware that an
// empty slot span might have been decommitted.
while (PA_LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
while ((new_slot_span = empty_slot_spans_head) != nullptr) [[likely]] {
PA_DCHECK(new_slot_span->bucket == this);
PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
empty_slot_spans_head = new_slot_span->next_slot_span;
@@ -1365,6 +1385,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
if (new_slot_span->get_freelist_head()) {
new_slot_span->next_slot_span = nullptr;
new_slot_span->ToSuperPageExtent()
->ToWritable(root)
->IncrementNumberOfNonemptySlotSpans();
// Re-activating an empty slot span, update accounting.
@@ -1379,45 +1400,46 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
new_slot_span->next_slot_span = decommitted_slot_spans_head;
decommitted_slot_spans_head = new_slot_span;
}
if (PA_UNLIKELY(!new_slot_span) &&
PA_LIKELY(decommitted_slot_spans_head != nullptr)) {
// Commit can be expensive, don't do it.
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
return 0;
}
new_slot_span = decommitted_slot_spans_head;
PA_DCHECK(new_slot_span->bucket == this);
PA_DCHECK(new_slot_span->is_decommitted());
// If lazy commit is enabled, pages will be recommitted when provisioning
// slots, in ProvisionMoreSlotsAndAllocOne(), not here.
if (!kUseLazyCommit) {
uintptr_t slot_span_start =
SlotSpanMetadata::ToSlotSpanStart(new_slot_span);
// Since lazy commit isn't used, we have a guarantee that all slot span
// pages have been previously committed, and then decommitted using
// PageAccessibilityDisposition::kAllowKeepForPerf, so use the
// same option as an optimization.
const bool ok = root->TryRecommitSystemPagesForDataLocked(
slot_span_start, new_slot_span->bucket->get_bytes_per_span(),
PageAccessibilityDisposition::kAllowKeepForPerf,
slot_size <= kMaxMemoryTaggingSize);
if (!ok) {
if (!ContainsFlags(flags, AllocFlags::kReturnNull)) {
ScopedUnlockGuard unlock{PartitionRootLock(root)};
PartitionOutOfMemoryCommitFailure(
root, new_slot_span->bucket->get_bytes_per_span());
}
if (!new_slot_span) [[unlikely]] {
if (decommitted_slot_spans_head != nullptr) [[likely]] {
// Commit can be expensive, don't do it.
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
return 0;
}
}
decommitted_slot_spans_head = new_slot_span->next_slot_span;
new_slot_span->Reset();
*is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
new_slot_span = decommitted_slot_spans_head;
PA_DCHECK(new_slot_span->bucket == this);
PA_DCHECK(new_slot_span->is_decommitted());
// If lazy commit is enabled, pages will be recommitted when
// provisioning slots, in ProvisionMoreSlotsAndAllocOne(), not here.
if (!kUseLazyCommit) {
uintptr_t slot_span_start =
SlotSpanMetadata::ToSlotSpanStart(new_slot_span);
// Since lazy commit isn't used, we have a guarantee that all slot
// span pages have been previously committed, and then decommitted
// using PageAccessibilityDisposition::kAllowKeepForPerf, so use the
// same option as an optimization.
const bool ok = root->TryRecommitSystemPagesForDataLocked(
slot_span_start, new_slot_span->bucket->get_bytes_per_span(),
PageAccessibilityDisposition::kAllowKeepForPerf,
slot_size <= kMaxMemoryTaggingSize);
if (!ok) {
if (!ContainsFlags(flags, AllocFlags::kReturnNull)) {
ScopedUnlockGuard unlock{PartitionRootLock(root)};
PartitionOutOfMemoryCommitFailure(
root, new_slot_span->bucket->get_bytes_per_span());
}
return 0;
}
}
decommitted_slot_spans_head = new_slot_span->next_slot_span;
new_slot_span->Reset();
*is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
}
PA_DCHECK(new_slot_span);
}
PA_DCHECK(new_slot_span);
} else {
// Getting a new slot span is expensive, don't do it.
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
@@ -1433,7 +1455,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
}
// Bail if we had a memory allocation failure.
if (PA_UNLIKELY(!new_slot_span)) {
if (!new_slot_span) [[unlikely]] {
PA_DCHECK(active_slot_spans_head ==
SlotSpanMetadata::get_sentinel_slot_span());
if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
@@ -1454,7 +1476,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
// If we found an active slot span with free slots, or an empty slot span, we
// have a usable freelist head.
if (PA_LIKELY(new_slot_span->get_freelist_head() != nullptr)) {
if (new_slot_span->get_freelist_head() != nullptr) [[likely]] {
const PartitionFreelistDispatcher* freelist_dispatcher =
root->get_freelist_dispatcher();
PartitionFreelistEntry* entry =
@@ -9,6 +9,7 @@
#include "partition_alloc/partition_bucket.h"
#include "partition_alloc/partition_page.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_superpage_extent_entry.h"
namespace partition_alloc::internal {
@@ -26,14 +27,6 @@ void DCheckIsValidShiftFromSlotStart(internal::SlotSpanMetadata* slot_span,
PA_DCHECK(shift_from_slot_start <= root->GetSlotUsableSize(slot_span));
}
void DCheckIsWithInSuperPagePayload(uintptr_t address) {
uintptr_t super_page = address & kSuperPageBaseMask;
auto* extent = PartitionSuperPageToExtent(super_page);
PA_DCHECK(IsWithinSuperPagePayload(address,
IsManagedByNormalBuckets(address) &&
extent->root->IsQuarantineAllowed()));
}
void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
uintptr_t object_addr) {
uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
@@ -42,14 +35,16 @@ void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
}
void DCheckNumberOfPartitionPagesInSuperPagePayload(
const PartitionSuperPageExtentEntry* entry,
WritablePartitionSuperPageExtentEntry* entry,
const PartitionRoot* root,
size_t number_of_nonempty_slot_spans) {
uintptr_t super_page = base::bits::AlignDown(
reinterpret_cast<uintptr_t>(entry), kSuperPageAlignment);
ReadOnlyPartitionSuperPageExtentEntry* readonly_entry =
entry->ToReadOnly(root);
uintptr_t entry_address = reinterpret_cast<uintptr_t>(readonly_entry);
uintptr_t super_page =
base::bits::AlignDown(entry_address, kSuperPageAlignment);
size_t number_of_partition_pages_in_superpage_payload =
SuperPagePayloadSize(super_page, root->IsQuarantineAllowed()) /
PartitionPageSize();
SuperPagePayloadSize(super_page) / PartitionPageSize();
PA_DCHECK(number_of_partition_pages_in_superpage_payload >
number_of_nonempty_slot_spans);
}
@@ -58,10 +53,6 @@ void DCheckRootLockIsAcquired(PartitionRoot* root) {
PartitionRootLock(root).AssertAcquired();
}
void DCheckRootLockOfSlotSpanIsAcquired(internal::SlotSpanMetadata* slot_span) {
DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpanMetadata(slot_span));
}
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
} // namespace partition_alloc::internal
@@ -12,7 +12,7 @@
namespace partition_alloc::internal {
struct PartitionSuperPageExtentEntry;
struct WritablePartitionSuperPageExtentEntry;
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
@@ -37,13 +37,9 @@ PA_EXPORT_IF_DCHECK_IS_ON()
void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span)
PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
PA_EXPORT_IF_DCHECK_IS_ON()
void DCheckIsWithInSuperPagePayload(uintptr_t address)
PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
PA_EXPORT_IF_DCHECK_IS_ON()
void DCheckNumberOfPartitionPagesInSuperPagePayload(
const PartitionSuperPageExtentEntry* entry,
WritablePartitionSuperPageExtentEntry* entry,
const PartitionRoot* root,
size_t number_of_nonempty_slot_spans) PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
@@ -12,57 +12,227 @@
namespace partition_alloc::internal {
struct ReadOnlyPartitionDirectMapExtent;
struct WritablePartitionDirectMapExtent;
struct WritablePartitionDirectMapMetadata;
template <const MetadataKind kind>
struct PartitionDirectMapExtent {
PartitionDirectMapExtent* next_extent;
PartitionDirectMapExtent* prev_extent;
PartitionBucket* bucket;
using ReadOnlyType = ReadOnlyPartitionDirectMapExtent;
using WritableType = WritablePartitionDirectMapExtent;
MaybeConstT<kind, ReadOnlyPartitionDirectMapExtent*> next_extent;
MaybeConstT<kind, ReadOnlyPartitionDirectMapExtent*> prev_extent;
MaybeConstT<kind, const PartitionBucket*> bucket;
// Size of the entire reservation, including guard pages, meta-data,
// padding for alignment before allocation, and padding for granularity at the
// end of the allocation.
size_t reservation_size;
MaybeConstT<kind, size_t> reservation_size;
// Padding between the first partition page (guard pages + meta-data) and
// the allocation.
size_t padding_for_alignment;
PA_ALWAYS_INLINE static PartitionDirectMapExtent* FromSlotSpanMetadata(
SlotSpanMetadata* slot_span);
MaybeConstT<kind, size_t> padding_for_alignment;
};
struct ReadOnlyPartitionDirectMapExtent
: public PartitionDirectMapExtent<MetadataKind::kReadOnly> {
PA_ALWAYS_INLINE static ReadOnlyPartitionDirectMapExtent*
FromSlotSpanMetadata(SlotSpanMetadata* slot_span);
PA_ALWAYS_INLINE WritablePartitionDirectMapExtent* ToWritable(
const PartitionRoot* root);
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapExtent* ToReadOnly();
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
private:
// In order to resolve circular dependencies, i.e. ToWritable() needs
// PartitionRoot, define template method: ToWritableInternal() and
// ToWritable() uses it.
template <typename T>
WritablePartitionDirectMapExtent* ToWritableInternal(
[[maybe_unused]] T* root);
};
struct WritablePartitionDirectMapExtent
: public PartitionDirectMapExtent<MetadataKind::kWritable> {
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapExtent* ToReadOnly(
const PartitionRoot* root);
private:
// In order to resolve circular dependencies, i.e. ToReadOnly() needs
// PartitionRoot, define template method: ToReadOnlyInternal() and
// ToReadOnly() uses it.
template <typename T>
ReadOnlyPartitionDirectMapExtent* ToReadOnlyInternal(
[[maybe_unused]] T* root);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
};
struct ReadOnlyPartitionDirectMapMetadata;
struct WritablePartitionDirectMapMetadata;
// Metadata page for direct-mapped allocations.
template <const MetadataKind kind>
struct PartitionDirectMapMetadata {
// |page_metadata| and |second_page_metadata| are needed to match the
// layout of normal buckets (specifically, of single-slot slot spans), with
// the caveat that only the first subsequent page is needed (for
// SubsequentPageMetadata) and others aren't used for direct map.
PartitionPageMetadata page_metadata;
PartitionPageMetadata second_page_metadata;
// TODO(crbug.com/40238514): Will be ReadOnlyPartitionPageMetadata.
MaybeConstT<kind, PartitionPageMetadata> page_metadata;
MaybeConstT<kind, PartitionPageMetadata> second_page_metadata;
// The following fields are metadata specific to direct map allocations. All
// these fields will easily fit into the precalculated metadata region,
// because a direct map allocation starts no further than half way through the
// super page.
PartitionBucket bucket;
PartitionDirectMapExtent direct_map_extent;
MaybeConstT<kind, PartitionBucket> bucket;
PA_ALWAYS_INLINE static PartitionDirectMapMetadata* FromSlotSpanMetadata(
SlotSpanMetadata* slot_span);
std::conditional_t<kind == MetadataKind::kReadOnly,
ReadOnlyPartitionDirectMapExtent,
WritablePartitionDirectMapExtent>
direct_map_extent;
};
PA_ALWAYS_INLINE PartitionDirectMapMetadata*
PartitionDirectMapMetadata::FromSlotSpanMetadata(SlotSpanMetadata* slot_span) {
struct ReadOnlyPartitionDirectMapMetadata
: public PartitionDirectMapMetadata<MetadataKind::kReadOnly> {
PA_ALWAYS_INLINE static ReadOnlyPartitionDirectMapMetadata*
FromSlotSpanMetadata(SlotSpanMetadata* slot_span);
PA_ALWAYS_INLINE WritablePartitionDirectMapMetadata* ToWritable(
const PartitionRoot* root);
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapMetadata* ToReadOnly();
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
private:
// In order to resolve circular dependencies, i.e. ToWritable() needs
// PartitionRoot, define template method: ToWritableInternal() and
// ToWritable() uses it.
template <typename T>
WritablePartitionDirectMapMetadata* ToWritableInternal(
[[maybe_unused]] T* root);
};
struct WritablePartitionDirectMapMetadata
: public PartitionDirectMapMetadata<MetadataKind::kWritable> {
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapMetadata* ToReadOnly(
const PartitionRoot* root);
private:
// In order to resolve circular dependencies, i.e. ToReadOnly() needs
// PartitionRoot, define template method: ToReadOnlyInternal() and
// ToReadOnly() uses it.
template <typename T>
ReadOnlyPartitionDirectMapMetadata* ToReadOnlyInternal(
[[maybe_unused]] T* root);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
};
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapMetadata*
ReadOnlyPartitionDirectMapMetadata::FromSlotSpanMetadata(
SlotSpanMetadata* slot_span) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
// |*slot_span| is the first field of |PartitionDirectMapMetadata|, just cast.
auto* metadata = reinterpret_cast<PartitionDirectMapMetadata*>(slot_span);
auto* metadata =
reinterpret_cast<ReadOnlyPartitionDirectMapMetadata*>(slot_span);
PA_DCHECK(&metadata->page_metadata.slot_span_metadata == slot_span);
return metadata;
}
PA_ALWAYS_INLINE PartitionDirectMapExtent*
PartitionDirectMapExtent::FromSlotSpanMetadata(SlotSpanMetadata* slot_span) {
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapExtent*
ReadOnlyPartitionDirectMapExtent::FromSlotSpanMetadata(
SlotSpanMetadata* slot_span) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
return &PartitionDirectMapMetadata::FromSlotSpanMetadata(slot_span)
return &ReadOnlyPartitionDirectMapMetadata::FromSlotSpanMetadata(slot_span)
->direct_map_extent;
}
PA_ALWAYS_INLINE WritablePartitionDirectMapMetadata*
ReadOnlyPartitionDirectMapMetadata::ToWritable(const PartitionRoot* root) {
return ToWritableInternal(root);
}
template <typename T>
WritablePartitionDirectMapMetadata*
ReadOnlyPartitionDirectMapMetadata::ToWritableInternal(
[[maybe_unused]] T* root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<WritablePartitionDirectMapMetadata*>(
reinterpret_cast<intptr_t>(this) + root->ShadowPoolOffset());
#else
return reinterpret_cast<WritablePartitionDirectMapMetadata*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
PA_ALWAYS_INLINE WritablePartitionDirectMapExtent*
ReadOnlyPartitionDirectMapExtent::ToWritable(const PartitionRoot* root) {
return ToWritableInternal(root);
}
template <typename T>
WritablePartitionDirectMapExtent*
ReadOnlyPartitionDirectMapExtent::ToWritableInternal([[maybe_unused]] T* root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<WritablePartitionDirectMapExtent*>(
reinterpret_cast<intptr_t>(this) + root->ShadowPoolOffset());
#else
return reinterpret_cast<WritablePartitionDirectMapExtent*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapMetadata*
ReadOnlyPartitionDirectMapMetadata::ToReadOnly() {
return this;
}
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapMetadata*
WritablePartitionDirectMapMetadata::ToReadOnly(const PartitionRoot* root) {
return ToReadOnlyInternal(root);
}
template <typename T>
ReadOnlyPartitionDirectMapMetadata*
WritablePartitionDirectMapMetadata::ToReadOnlyInternal(
[[maybe_unused]] T* root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<ReadOnlyPartitionDirectMapMetadata*>(
reinterpret_cast<intptr_t>(this) - root->ShadowPoolOffset());
#else
// must be no-op.
return reinterpret_cast<ReadOnlyPartitionDirectMapMetadata*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapExtent*
ReadOnlyPartitionDirectMapExtent::ToReadOnly() {
return this;
}
PA_ALWAYS_INLINE ReadOnlyPartitionDirectMapExtent*
WritablePartitionDirectMapExtent::ToReadOnly(const PartitionRoot* root) {
return ToReadOnlyInternal(root);
}
template <typename T>
ReadOnlyPartitionDirectMapExtent*
WritablePartitionDirectMapExtent::ToReadOnlyInternal([[maybe_unused]] T* root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<ReadOnlyPartitionDirectMapExtent*>(
reinterpret_cast<intptr_t>(this) - root->ShadowPoolOffset());
#else
return reinterpret_cast<ReadOnlyPartitionDirectMapExtent*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
} // namespace partition_alloc::internal
#endif // PARTITION_ALLOC_PARTITION_DIRECT_MAP_EXTENT_H_
@@ -50,8 +50,8 @@ class PA_LOCKABLE Lock {
// Note that we don't rely on a DCHECK() in base::Lock(), as it would
// itself allocate. Meaning that without this code, a reentrancy issue
// hangs on Linux.
if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
current_thread)) {
if (owning_thread_ref_.load(std::memory_order_acquire) == current_thread)
[[unlikely]] {
// Trying to acquire lock while it's held by this thread: reentrancy
// issue.
PA_IMMEDIATE_CRASH();
@@ -35,18 +35,19 @@ void UnmapNow(uintptr_t reservation_start,
PA_ALWAYS_INLINE void PartitionDirectUnmap(SlotSpanMetadata* slot_span) {
auto* root = PartitionRoot::FromSlotSpanMetadata(slot_span);
PartitionRootLock(root).AssertAcquired();
auto* extent = PartitionDirectMapExtent::FromSlotSpanMetadata(slot_span);
auto* extent =
ReadOnlyPartitionDirectMapExtent::FromSlotSpanMetadata(slot_span);
// Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) {
PA_DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent;
extent->prev_extent->ToWritable(root)->next_extent = extent->next_extent;
} else {
root->direct_map_list = extent->next_extent;
}
if (extent->next_extent) {
PA_DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent;
extent->next_extent->ToWritable(root)->prev_extent = extent->prev_extent;
}
// The actual decommit is deferred below after releasing the lock.
@@ -87,7 +88,9 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::RegisterEmpty() {
root->empty_slot_spans_dirty_bytes +=
base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans();
// TODO(crbug.com/40238514): SlotSpanMetadata::RegisterEmpty() will be
// WritableSlotSpanMetadata::RegisterEmpty(). So ToWritable() will be removed.
ToSuperPageExtent()->ToWritable(root)->DecrementNumberOfNonemptySlotSpans();
// If the slot span is already registered as empty, don't do anything. This
// prevents continually reusing a slot span from decommitting a bunch of other
@@ -177,7 +180,7 @@ void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
// chances of it being filled up again. The old current slot span will be
// the next slot span.
PA_DCHECK(!next_slot_span);
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
if (bucket->active_slot_spans_head != get_sentinel_slot_span()) [[likely]] {
next_slot_span = bucket->active_slot_spans_head;
}
bucket->active_slot_spans_head = this;
@@ -185,9 +188,9 @@ void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
--bucket->num_full_slot_spans;
}
if (PA_LIKELY(num_allocated_slots == 0)) {
if (num_allocated_slots == 0) [[likely]] {
// Slot span became fully unused.
if (PA_UNLIKELY(bucket->is_direct_mapped())) {
if (bucket->is_direct_mapped()) [[unlikely]] {
PartitionDirectUnmap(this);
return;
}
@@ -200,7 +203,7 @@ void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
// If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation.
if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
if (this == bucket->active_slot_spans_head) [[likely]] {
bucket->SetNewActiveSlotSpan();
}
PA_DCHECK(bucket->active_slot_spans_head != this);
@@ -310,6 +313,18 @@ void SlotSpanMetadata::SortFreelist() {
freelist_is_sorted_ = true;
}
void SlotSpanMetadata::IncrementNumberOfNonemptySlotSpans() {
// TODO(crbug.com/40238514):
// SlotSpanMetadata::IncrementNumberOfNonemptySlotSpans() will be
// WritableSlotSpanMetadata::IncrementNumberOfNonemptySlotSpans(). So
// we will remove |root| and |ToWritable()| after introducing
// WritableSlotSpanMetadata.
auto* root = PartitionRoot::FromSlotSpanMetadata(this);
WritablePartitionSuperPageExtentEntry* extent =
ToSuperPageExtent()->ToWritable(root);
extent->IncrementNumberOfNonemptySlotSpans();
}
namespace {
void UnmapNow(uintptr_t reservation_start,
@@ -150,7 +150,8 @@ struct SlotSpanMetadata {
uintptr_t address);
PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* ToSuperPageExtent() const;
PA_ALWAYS_INLINE ReadOnlyPartitionSuperPageExtentEntry* ToSuperPageExtent()
const;
// Checks if it is feasible to store raw_size.
PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
@@ -172,7 +173,7 @@ struct SlotSpanMetadata {
// - Exact size needed to satisfy allocation (incl. extras), for large
// buckets and direct-mapped allocations (see also the comment in
// CanStoreRawSize() for more info).
if (PA_LIKELY(!CanStoreRawSize())) {
if (!CanStoreRawSize()) [[likely]] {
return bucket->slot_size;
}
return GetRawSize();
@@ -227,6 +228,8 @@ struct SlotSpanMetadata {
PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
private:
void IncrementNumberOfNonemptySlotSpans();
// sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
// span in the active list. We could use nullptr, but in that case we need to
// add a null-check branch to the hot allocation path. We want to avoid that.
@@ -353,10 +356,10 @@ PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
return &(page_metadata + 1)->subsequent_page_metadata;
}
PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
uintptr_t super_page) {
PA_ALWAYS_INLINE ReadOnlyPartitionSuperPageExtentEntry*
PartitionSuperPageToExtent(uintptr_t super_page) {
// The very first entry of the metadata is the super page extent entry.
return reinterpret_cast<PartitionSuperPageExtentEntry*>(
return reinterpret_cast<ReadOnlyPartitionSuperPageExtentEntry*>(
PartitionSuperPageToMetadataArea(super_page));
}
@@ -366,19 +369,15 @@ ReservedStateBitmapSize() {
}
PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
bool with_quarantine) {
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets) {
return PartitionPageSize() +
(is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
(with_quarantine ? ReservedStateBitmapSize() : 0);
(is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0);
}
PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
bool with_quarantine) {
PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page +
SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
with_quarantine);
SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page));
}
PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
@@ -390,13 +389,11 @@ PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
return super_page + SuperPagePayloadEndOffset();
}
PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
bool with_quarantine) {
return SuperPagePayloadEnd(super_page) -
SuperPagePayloadBegin(super_page, with_quarantine);
PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page) {
return SuperPagePayloadEnd(super_page) - SuperPagePayloadBegin(super_page);
}
PA_ALWAYS_INLINE PartitionSuperPageExtentEntry*
PA_ALWAYS_INLINE ReadOnlyPartitionSuperPageExtentEntry*
SlotSpanMetadata::ToSuperPageExtent() const {
uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
return PartitionSuperPageToExtent(super_page);
@@ -406,12 +403,9 @@ SlotSpanMetadata::ToSuperPageExtent() const {
// area devoted to slot spans). It doesn't check whether it's within a valid
// slot span. It merely ensures it doesn't fall in a meta-data region that would
// surely never contain user data.
PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
bool with_quarantine) {
// Quarantine can only be enabled for normal buckets in the current code.
PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address) {
uintptr_t super_page = address & kSuperPageBaseMask;
uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
uintptr_t payload_start = SuperPagePayloadBegin(super_page);
uintptr_t payload_end = SuperPagePayloadEnd(super_page);
return address >= payload_start && address < payload_end;
}
@@ -431,7 +425,7 @@ PA_ALWAYS_INLINE PartitionPageMetadata* PartitionPageMetadata::FromAddr(
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(IsReservationStart(super_page));
DCheckIsWithInSuperPagePayload(address);
PA_DCHECK(IsWithinSuperPagePayload(address));
#endif
uintptr_t partition_page_index =
@@ -614,7 +608,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::Free(
--num_allocated_slots;
// If the span is marked full, or became empty, take the slow path to update
// internal state.
if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
if (marked_full || num_allocated_slots == 0) [[unlikely]] {
FreeSlowPath(1);
} else {
// All single-slot allocations must go through the slow path to
@@ -659,7 +653,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
num_allocated_slots -= number_of_freed;
// If the span is marked full, or became empty, take the slow path to update
// internal state.
if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
if (marked_full || num_allocated_slots == 0) [[unlikely]] {
FreeSlowPath(number_of_freed);
} else {
// All single-slot allocations must go through the slow path to
@@ -718,7 +712,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
num_unprovisioned_slots = static_cast<uint16_t>(num_slots_per_span);
PA_DCHECK(num_unprovisioned_slots);
ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
IncrementNumberOfNonemptySlotSpans();
next_slot_span = nullptr;
}
@@ -727,7 +721,6 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
// early return is needed.
template <typename Callback>
void IterateSlotSpans(uintptr_t super_page,
bool with_quarantine,
Callback callback) {
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(!(super_page % kSuperPageAlignment));
@@ -735,8 +728,8 @@ void IterateSlotSpans(uintptr_t super_page,
DCheckRootLockIsAcquired(extent_entry->root);
#endif
auto* const first_page_metadata = PartitionPageMetadata::FromAddr(
SuperPagePayloadBegin(super_page, with_quarantine));
auto* const first_page_metadata =
PartitionPageMetadata::FromAddr(SuperPagePayloadBegin(super_page));
auto* const last_page_metadata = PartitionPageMetadata::FromAddr(
SuperPagePayloadEnd(super_page) - PartitionPageSize());
PartitionPageMetadata* page_metadata = nullptr;
@@ -362,7 +362,8 @@ void MakeSuperPageExtentEntriesShared(PartitionRoot* root,
}
// For normal-bucketed.
for (internal::PartitionSuperPageExtentEntry* extent = root->first_extent;
for (const internal::ReadOnlyPartitionSuperPageExtentEntry* extent =
root->first_extent;
extent != nullptr; extent = extent->next) {
// The page which contains the extent is in-used and shared mapping.
uintptr_t super_page = SuperPagesBeginFromExtent(extent);
@@ -375,7 +376,8 @@ void MakeSuperPageExtentEntriesShared(PartitionRoot* root,
}
// For direct-mapped.
for (const internal::PartitionDirectMapExtent* extent = root->direct_map_list;
for (const internal::ReadOnlyPartitionDirectMapExtent* extent =
root->direct_map_list;
extent != nullptr; extent = extent->next_extent) {
internal::PartitionAddressSpace::MapMetadata(
reinterpret_cast<uintptr_t>(extent) & internal::kSuperPageBaseMask,
@@ -1148,6 +1150,9 @@ void PartitionRoot::Init(PartitionOptions opts) {
PA_CHECK(!settings.memory_tagging_enabled_ ||
!settings.use_configurable_pool);
settings.use_random_memory_tagging_ =
opts.memory_tagging.random_memory_tagging == PartitionOptions::kEnabled;
settings.memory_tagging_reporting_mode_ =
opts.memory_tagging.reporting_mode;
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
@@ -1187,8 +1192,6 @@ void PartitionRoot::Init(PartitionOptions opts) {
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // PA_CONFIG(EXTRAS_REQUIRED)
settings.quarantine_mode = QuarantineMode::kAlwaysDisabled;
// We mark the sentinel slot span as free to make sure it is skipped by our
// logic to find a new active slot span.
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
@@ -1329,7 +1332,7 @@ bool PartitionRoot::TryReallocInPlaceForDirectMap(
internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
auto* extent = DirectMapExtent::FromSlotSpanMetadata(slot_span);
auto* extent = ReadOnlyDirectMapExtent::FromSlotSpanMetadata(slot_span);
size_t current_reservation_size = extent->reservation_size;
// Calculate the new reservation size the way PartitionDirectMap() would, but
// skip the alignment, because this call isn't requesting it.
@@ -1462,7 +1465,7 @@ bool PartitionRoot::TryReallocInPlaceForNormalBuckets(
if (slot_span->CanStoreRawSize()) {
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && PA_BUILDFLAG(DCHECKS_ARE_ON)
internal::InSlotMetadata* old_ref_count = nullptr;
if (PA_LIKELY(brp_enabled())) {
if (brp_enabled()) [[likely]] {
old_ref_count = InSlotMetadataPointerFromSlotStartAndSize(
slot_start, slot_span->bucket->slot_size);
}
@@ -1471,7 +1474,7 @@ bool PartitionRoot::TryReallocInPlaceForNormalBuckets(
size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
slot_span->SetRawSize(new_raw_size);
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && PA_BUILDFLAG(DCHECKS_ARE_ON)
if (PA_LIKELY(brp_enabled())) {
if (brp_enabled()) [[likely]] {
internal::InSlotMetadata* new_ref_count =
InSlotMetadataPointerFromSlotStartAndSize(
slot_start, slot_span->bucket->slot_size);
@@ -1491,7 +1494,7 @@ bool PartitionRoot::TryReallocInPlaceForNormalBuckets(
// place. When we cannot do it in place (`return false` above), the allocator
// falls back to free()+malloc(), so this is consistent.
ThreadCache* thread_cache = GetOrCreateThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
if (ThreadCache::IsValid(thread_cache)) [[likely]] {
thread_cache->RecordDeallocation(current_usable_size);
thread_cache->RecordAllocation(GetSlotUsableSize(slot_span));
}
@@ -1685,7 +1688,7 @@ void PartitionRoot::DumpStats(const char* partition_name,
}
}
for (DirectMapExtent* extent = direct_map_list;
for (const ReadOnlyDirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
PA_DCHECK(!extent->next_extent ||
@@ -1903,7 +1906,7 @@ PA_NOINLINE void PartitionRoot::QuarantineForBrp(
void* object) {
auto usable_size = GetSlotUsableSize(slot_span);
auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
if (PA_UNLIKELY(hook)) {
if (hook) [[unlikely]] {
hook(object, usable_size);
} else {
internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
@@ -175,6 +175,7 @@ struct PartitionOptions {
struct {
EnableToggle enabled = kDisabled;
EnableToggle random_memory_tagging = kDisabled;
TagViolationReportingMode reporting_mode =
TagViolationReportingMode::kUndefined;
} memory_tagging;
@@ -205,19 +206,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
using SlotSpanMetadata = internal::SlotSpanMetadata;
using Bucket = internal::PartitionBucket;
using FreeListEntry = internal::PartitionFreelistEntry;
using SuperPageExtentEntry = internal::PartitionSuperPageExtentEntry;
using DirectMapExtent = internal::PartitionDirectMapExtent;
enum class QuarantineMode : uint8_t {
kAlwaysDisabled,
kDisabledByDefault,
kEnabled,
};
enum class ScanMode : uint8_t {
kDisabled,
kEnabled,
};
using WritableSuperPageExtentEntry =
internal::WritablePartitionSuperPageExtentEntry;
using ReadOnlySuperPageExtentEntry =
internal::ReadOnlyPartitionSuperPageExtentEntry;
using WritableDirectMapExtent = internal::WritablePartitionDirectMapExtent;
using ReadOnlyDirectMapExtent = internal::ReadOnlyPartitionDirectMapExtent;
enum class BucketDistribution : uint8_t { kNeutral, kDenser };
@@ -230,12 +224,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// constructor.
Settings();
// Defines whether objects should be quarantined for this root.
QuarantineMode quarantine_mode = QuarantineMode::kAlwaysDisabled;
// Defines whether the root should be scanned.
ScanMode scan_mode = ScanMode::kDisabled;
// It's important to default to the 'neutral' distribution, otherwise a
// switch from 'dense' -> 'neutral' would leave some buckets with dirty
// memory forever, since no memory would be allocated from these, their
@@ -262,6 +250,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool scheduler_loop_quarantine = false;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_ = false;
bool use_random_memory_tagging_ = false;
TagViolationReportingMode memory_tagging_reporting_mode_ =
TagViolationReportingMode::kUndefined;
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
@@ -349,9 +338,9 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
uintptr_t next_super_page = 0;
uintptr_t next_partition_page = 0;
uintptr_t next_partition_page_end = 0;
SuperPageExtentEntry* current_extent = nullptr;
SuperPageExtentEntry* first_extent = nullptr;
DirectMapExtent* direct_map_list
ReadOnlySuperPageExtentEntry* current_extent = nullptr;
ReadOnlySuperPageExtentEntry* first_extent = nullptr;
ReadOnlyDirectMapExtent* direct_map_list
PA_GUARDED_BY(internal::PartitionRootLock(this)) = nullptr;
SlotSpanMetadata*
global_empty_slot_span_ring[internal::kMaxFreeableSpans] PA_GUARDED_BY(
@@ -371,8 +360,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
uintptr_t inverted_self = 0;
std::atomic<int> thread_caches_being_constructed_{0};
bool quarantine_always_for_testing = false;
size_t scheduler_loop_quarantine_branch_capacity_in_bytes = 0;
internal::LightweightQuarantineRoot scheduler_loop_quarantine_root;
// NoDestructor because we don't need to dequarantine objects as the root
@@ -593,6 +580,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
PA_ALWAYS_INLINE bool UseRandomMemoryTagging() const;
PA_ALWAYS_INLINE TagViolationReportingMode
memory_tagging_reporting_mode() const;
@@ -646,8 +634,17 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
PA_ALWAYS_INLINE void RawFreeWithThreadCache(uintptr_t slot_start,
void* object,
SlotSpanMetadata* slot_span);
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
// Sets a new MTE tag on the slot. This must not be called when an object
// enters BRP quarantine because it might cause a race with |raw_ptr|'s
// ref-count decrement. (crbug.com/357526108)
PA_ALWAYS_INLINE void RetagSlotIfNeeded(void* slot_start_ptr,
size_t slot_size);
#endif
// This is safe to do because we are switching to a bucket distribution with
// more buckets, meaning any allocations we have done before the switch are
// guaranteed to have a bucket under the new distribution when they are
@@ -698,7 +695,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
}
#endif
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (PA_LIKELY(brp_enabled())) {
if (brp_enabled()) [[likely]] {
return internal::kBRPPoolHandle;
} else {
return internal::kRegularPoolHandle;
@@ -708,46 +705,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
}
PA_ALWAYS_INLINE bool IsQuarantineAllowed() const {
return settings.quarantine_mode != QuarantineMode::kAlwaysDisabled;
}
PA_ALWAYS_INLINE bool IsQuarantineEnabled() const {
return settings.quarantine_mode == QuarantineMode::kEnabled;
}
PA_ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
if (PA_UNLIKELY(settings.quarantine_mode != QuarantineMode::kEnabled)) {
return false;
}
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (PA_UNLIKELY(quarantine_always_for_testing)) {
return true;
}
// If quarantine is enabled and the tag overflows, move the containing slot
// to quarantine, to prevent the attacker from exploiting a pointer that has
// an old tag.
if (PA_LIKELY(IsMemoryTaggingEnabled())) {
return internal::HasOverflowTag(object);
}
// Default behaviour if MTE is not enabled for this PartitionRoot.
return true;
#else
return true;
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
}
PA_ALWAYS_INLINE void SetQuarantineAlwaysForTesting(bool value) {
quarantine_always_for_testing = value;
}
PA_ALWAYS_INLINE bool IsScanEnabled() const {
// Enabled scan implies enabled quarantine.
PA_DCHECK(settings.scan_mode != ScanMode::kEnabled ||
IsQuarantineEnabled());
return settings.scan_mode == ScanMode::kEnabled;
}
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
GetDirectMapMetadataAndGuardPagesSize() {
// Because we need to fake a direct-map region to look like a super page, we
@@ -800,7 +757,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// be rather complex. Then there is also the fear of the unknown. The
// existing cases were discovered through obscure, painful-to-debug crashes.
// Better save ourselves trouble with not-yet-discovered cases.
if (PA_UNLIKELY(size == 0)) {
if (size == 0) [[unlikely]] {
return 1;
}
return size;
@@ -910,6 +867,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
}
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
// TODO(crbug.com/40238514) This is an unused function. Start using it in
// tests and/or in production code.
static void EnableShadowMetadata(internal::PoolHandleMask mask);
PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset() const {
@@ -1140,7 +1099,7 @@ PartitionAllocGetDirectMapSlotStartAndSizeInBRPPool(uintptr_t address) {
uintptr_t slot_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
auto* direct_map_metadata =
PartitionDirectMapMetadata::FromSlotSpanMetadata(slot_span);
ReadOnlyPartitionDirectMapMetadata::FromSlotSpanMetadata(slot_span);
size_t padding_for_alignment =
direct_map_metadata->direct_map_extent.padding_for_alignment;
PA_DCHECK(padding_for_alignment ==
@@ -1167,7 +1126,7 @@ PartitionAllocGetSlotStartAndSizeInBRPPool(uintptr_t address) {
auto directmap_slot_info =
PartitionAllocGetDirectMapSlotStartAndSizeInBRPPool(address);
if (PA_UNLIKELY(directmap_slot_info.slot_start)) {
if (directmap_slot_info.slot_start) [[unlikely]] {
return directmap_slot_info;
}
@@ -1235,7 +1194,7 @@ PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
// If we have a hook the object segment is not necessarily filled
// with |kQuarantinedByte|.
if (PA_LIKELY(!hook)) {
if (!hook) [[likely]] {
unsigned char* object =
static_cast<unsigned char*>(root->SlotStartToObject(slot_start));
for (size_t i = 0; i < root->GetSlotUsableSize(slot_span); ++i) {
@@ -1251,7 +1210,8 @@ PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
root->total_count_of_brp_quarantined_slots.fetch_sub(
1, std::memory_order_relaxed);
root->RawFreeWithThreadCache(slot_start, slot_span);
root->RawFreeWithThreadCache(slot_start, SlotStartAddr2Ptr(slot_start),
slot_span);
}
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@@ -1280,8 +1240,8 @@ PartitionRoot::AllocFromBucket(Bucket* bucket,
// first active slot span. However, fall back to the slow path if a
// higher-order alignment is requested, because an inner slot of an existing
// slot span is unlikely to satisfy it.
if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
slot_start)) {
if (slot_span_alignment <= internal::PartitionPageSize() && slot_start)
[[likely]] {
*is_already_zeroed = false;
// This is a fast path, avoid calling GetSlotUsableSize() in Release builds
// as it is costlier. Copy its small bucket path instead.
@@ -1308,7 +1268,7 @@ PartitionRoot::AllocFromBucket(Bucket* bucket,
slot_start =
bucket->SlowPathAlloc(this, flags, raw_size, slot_span_alignment,
&slot_span, is_already_zeroed);
if (PA_UNLIKELY(!slot_start)) {
if (!slot_start) [[unlikely]] {
return 0;
}
PA_DCHECK(slot_span == SlotSpanMetadata::FromSlotStart(slot_start));
@@ -1384,7 +1344,7 @@ PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
return true;
}
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
if (PA_UNLIKELY(!object)) {
if (!object) [[unlikely]] {
return true;
}
@@ -1413,6 +1373,14 @@ PA_ALWAYS_INLINE bool PartitionRoot::IsMemoryTaggingEnabled() const {
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
}
PA_ALWAYS_INLINE bool PartitionRoot::UseRandomMemoryTagging() const {
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return settings.use_random_memory_tagging_;
#else
return false;
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
}
PA_ALWAYS_INLINE TagViolationReportingMode
PartitionRoot::memory_tagging_reporting_mode() const {
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
@@ -1430,7 +1398,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInlineInUnknownRoot(void* object) {
return;
}
if (PA_UNLIKELY(!object)) {
if (!object) [[unlikely]] {
return;
}
@@ -1457,7 +1425,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
return;
}
if (PA_UNLIKELY(!object)) {
if (!object) [[unlikely]] {
return;
}
@@ -1487,17 +1455,6 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
PA_DCHECK(slot_span ==
SlotSpanMetadata::FromSlotStart(slot_start.untagged_slot_start));
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (PA_LIKELY(IsMemoryTaggingEnabled())) {
const size_t slot_size = slot_span->bucket->slot_size;
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
// Retag the `object` to provide MTE UaF mitigation. Doing so
// invalidates the tag in the address of `object`, so it must
// be refreshed.
object = internal::TagMemoryRangeIncrement(object, slot_size);
}
}
#else // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
// We are going to read from |*slot_span| in all branches, but haven't done it
// yet.
//
@@ -1506,11 +1463,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
// parts (i.e. bucket pointer) from the rest. Indeed, every thread cache miss
// (or batch fill) will *write* to |slot_span->freelist_head|, leading to
// cacheline ping-pong.
//
// Don't do it when memory tagging is enabled, as |*slot_span| has already
// been touched above.
PA_PREFETCH(slot_span);
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if constexpr (ContainsFlags(flags, FreeFlags::kZap)) {
if (settings.zapping_by_free_flags) {
@@ -1525,8 +1478,8 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
if (settings.scheduler_loop_quarantine) {
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled|
// will be false only for the aligned partition.
// TODO(keishi): Add `[[likely]]` when brp is fully enabled as
// `brp_enabled` will be false only for the aligned partition.
if (brp_enabled()) {
auto* ref_count = InSlotMetadataPointerFromSlotStartAndSize(
slot_start.untagged_slot_start, slot_span->bucket->slot_size);
@@ -1583,7 +1536,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
}
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (PA_LIKELY(brp_enabled())) {
if (brp_enabled()) [[likely]] {
auto* ref_count = InSlotMetadataPointerFromSlotStartAndSize(
slot_start, slot_span->bucket->slot_size);
// If there are no more references to the allocation, it can be freed
@@ -1592,11 +1545,11 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
// complete before we clear kMemoryHeldByAllocatorBit in
// ReleaseFromAllocator(), otherwise another thread may allocate and start
// using the slot in the middle of zapping.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs())) {
if (!ref_count->IsAliveWithNoKnownRefs()) [[unlikely]] {
QuarantineForBrp(slot_span, object);
}
if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
if (!(ref_count->ReleaseFromAllocator())) [[unlikely]] {
total_size_of_brp_quarantined_bytes.fetch_add(
slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
total_count_of_brp_quarantined_slots.fetch_add(1,
@@ -1617,14 +1570,15 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
#elif PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
// `memset` only once in a while: we're trading off safety for time
// efficiency.
if (PA_UNLIKELY(internal::RandomPeriod()) &&
if (internal::RandomPeriod() [[unlikely]] &&
!IsDirectMappedBucket(slot_span->bucket)) {
internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
slot_span->GetUtilizedSlotSize());
}
#endif // PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
RawFreeWithThreadCache(slot_start, slot_span);
// TODO(keishi): Create function to convert |object| to |slot_start_ptr|.
void* slot_start_ptr = object;
RawFreeWithThreadCache(slot_start, slot_start_ptr, slot_span);
}
PA_ALWAYS_INLINE void PartitionRoot::FreeInSlotSpan(
@@ -1724,19 +1678,48 @@ PA_ALWAYS_INLINE void PartitionRoot::RawFreeBatch(FreeListEntry* head,
this->get_freelist_dispatcher());
}
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
PA_ALWAYS_INLINE void PartitionRoot::RetagSlotIfNeeded(void* slot_start_ptr,
size_t slot_size) {
// This branch is |likely| because HAS_MEMORY_TAGGING build flag is true for
// arm64 Android devices and only a small portion of them will have memory
// tagging enabled.
if (!IsMemoryTaggingEnabled()) [[likely]] {
return;
}
if (slot_size <= internal::kMaxMemoryTaggingSize) [[likely]] {
if (UseRandomMemoryTagging()) {
// Exclude the previous tag so that immediate use after free is detected
// 100% of the time.
uint8_t previous_tag = internal::ExtractTagFromPtr(slot_start_ptr);
internal::TagMemoryRangeRandomly(slot_start_ptr, slot_size,
1 << previous_tag);
} else {
internal::TagMemoryRangeIncrement(slot_start_ptr, slot_size);
}
}
}
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
PA_ALWAYS_INLINE void PartitionRoot::RawFreeWithThreadCache(
uintptr_t slot_start,
void* slot_start_ptr,
SlotSpanMetadata* slot_span) {
// PA_LIKELY: performance-sensitive partitions have a thread cache,
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
RetagSlotIfNeeded(slot_start_ptr, slot_span->bucket->slot_size);
#endif
// `[[likely]]`: performance-sensitive partitions have a thread cache,
// direct-mapped allocations are uncommon.
ThreadCache* thread_cache = GetThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
!IsDirectMappedBucket(slot_span->bucket))) {
if (ThreadCache::IsValid(thread_cache) &&
!IsDirectMappedBucket(slot_span->bucket)) [[likely]] {
size_t bucket_index =
static_cast<size_t>(slot_span->bucket - this->buckets);
std::optional<size_t> slot_size =
thread_cache->MaybePutInCache(slot_start, bucket_index);
if (PA_LIKELY(slot_size.has_value())) {
if (slot_size.has_value()) [[likely]] {
// This is a fast path, avoid calling GetSlotUsableSize() in Release
// builds as it is costlier. Copy its small bucket path instead.
PA_DCHECK(!slot_span->CanStoreRawSize());
@@ -1747,7 +1730,7 @@ PA_ALWAYS_INLINE void PartitionRoot::RawFreeWithThreadCache(
}
}
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
if (ThreadCache::IsValid(thread_cache)) [[likely]] {
// Accounting must be done outside `RawFree()`, as it's also called from
// the thread cache. We would double-count otherwise.
//
@@ -1770,7 +1753,7 @@ PA_ALWAYS_INLINE void PartitionRoot::RawFreeLocked(uintptr_t slot_start) {
PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromSlotSpanMetadata(
SlotSpanMetadata* slot_span) {
auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
auto* extent_entry = reinterpret_cast<ReadOnlySuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
return extent_entry->root;
}
@@ -1855,7 +1838,7 @@ PA_ALWAYS_INLINE void PartitionRoot::RecommitSystemPagesForData(
auto page_accessibility = GetPageAccessibility(request_tagging);
bool ok = TryRecommitSystemPages(address, length, page_accessibility,
accessibility_disposition);
if (PA_UNLIKELY(!ok)) {
if (!ok) [[unlikely]] {
// Decommit some memory and retry. The alternative is crashing.
DecommitEmptySlotSpans();
RecommitSystemPages(address, length, page_accessibility,
@@ -1876,7 +1859,7 @@ PA_ALWAYS_INLINE bool PartitionRoot::TryRecommitSystemPagesForDataInternal(
auto page_accessibility = GetPageAccessibility(request_tagging);
bool ok = TryRecommitSystemPages(address, length, page_accessibility,
accessibility_disposition);
if (PA_UNLIKELY(!ok)) {
if (!ok) [[unlikely]] {
{
// Decommit some memory and retry. The alternative is crashing.
if constexpr (!already_locked) {
@@ -1952,9 +1935,9 @@ PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(void* ptr) {
#if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
// Check |mac11_malloc_size_hack_enabled_| flag first as this doesn't
// concern OS versions other than macOS 11.
if (PA_UNLIKELY(root->settings.mac11_malloc_size_hack_enabled_ &&
usable_size ==
root->settings.mac11_malloc_size_hack_usable_size_)) {
if (root->settings.mac11_malloc_size_hack_enabled_ &&
usable_size == root->settings.mac11_malloc_size_hack_usable_size_)
[[unlikely]] {
auto [slot_start, slot_size] =
internal::PartitionAllocGetSlotStartAndSizeInBRPPool(UntagPtr(ptr));
auto* ref_count =
@@ -2093,7 +2076,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocInternal(size_t requested_size,
AllocInternalNoHooks<flags>(requested_size, slot_span_alignment);
if constexpr (!no_hooks) {
if (PA_UNLIKELY(hooks_enabled)) {
if (hooks_enabled) [[unlikely]] {
PartitionAllocHooks::AllocationObserverHookIfEnabled(
CreateAllocationNotificationData(object, requested_size, type_name));
}
@@ -2138,16 +2121,16 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
// Don't use thread cache if higher order alignment is requested, because the
// thread cache will not be able to satisfy it.
//
// PA_LIKELY: performance-sensitive partitions use the thread cache.
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
slot_span_alignment <= internal::PartitionPageSize())) {
// `[[likely]]`: performance-sensitive partitions use the thread cache.
if (ThreadCache::IsValid(thread_cache) &&
slot_span_alignment <= internal::PartitionPageSize()) [[likely]] {
// Note: getting slot_size from the thread cache rather than by
// `buckets[bucket_index].slot_size` to avoid touching `buckets` on the fast
// path.
slot_start = thread_cache->GetFromCache(bucket_index, &slot_size);
// PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
if (PA_LIKELY(slot_start)) {
// `[[likely]]`: median hit rate in the thread cache is 95%, from metrics.
if (slot_start) [[likely]] {
// This follows the logic of SlotSpanMetadata::GetUsableSize for small
// buckets, which is too expensive to call here.
// Keep it in sync!
@@ -2177,11 +2160,11 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
&usable_size, &slot_size, &is_already_zeroed);
}
if (PA_UNLIKELY(!slot_start)) {
if (!slot_start) [[unlikely]] {
return nullptr;
}
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
if (ThreadCache::IsValid(thread_cache)) [[likely]] {
thread_cache->RecordAllocation(usable_size);
}
@@ -2235,7 +2218,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
// Fill the region kUninitializedByte (on debug builds, if not requested to 0)
// or 0 (if requested and not 0 already).
constexpr bool zero_fill = ContainsFlags(flags, AllocFlags::kZeroFill);
// PA_LIKELY: operator new() calls malloc(), not calloc().
// `[[likely]]`: operator new() calls malloc(), not calloc().
if constexpr (!zero_fill) {
// memset() can be really expensive.
#if PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
@@ -2246,14 +2229,14 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
}
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (PA_LIKELY(brp_enabled())) {
if (brp_enabled()) [[likely]] {
bool needs_mac11_malloc_size_hack = false;
#if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
// Only apply hack to size 32 allocations on macOS 11. There is a buggy
// assertion that malloc_size() equals sizeof(class_rw_t) which is 32.
if (PA_UNLIKELY(settings.mac11_malloc_size_hack_enabled_ &&
requested_size ==
internal::kMac11MallocSizeHackRequestedSize)) {
if (settings.mac11_malloc_size_hack_enabled_ &&
requested_size == internal::kMac11MallocSizeHackRequestedSize)
[[unlikely]] {
needs_mac11_malloc_size_hack = true;
}
#endif // PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
@@ -2324,7 +2307,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
// Handle cases such as size = 16, alignment = 64.
// Wastes memory when a large alignment is requested with a small size, but
// this is hard to avoid, and should not be too common.
if (PA_UNLIKELY(raw_size < alignment)) {
if (raw_size < alignment) [[unlikely]] {
raw_size = alignment;
} else {
// PartitionAlloc only guarantees alignment for power-of-two sized
@@ -2340,7 +2323,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
// Overflow check. adjusted_size must be larger or equal to requested_size.
if (PA_UNLIKELY(adjusted_size < requested_size)) {
if (adjusted_size < requested_size) [[unlikely]] {
if constexpr (ContainsFlags(flags, AllocFlags::kReturnNull)) {
return nullptr;
}
@@ -2384,12 +2367,12 @@ void* PartitionRoot::ReallocInline(void* ptr,
}
return result;
#else
if (PA_UNLIKELY(!ptr)) {
if (!ptr) [[unlikely]] {
return AllocInternal<alloc_flags>(new_size, internal::PartitionPageSize(),
type_name);
}
if (PA_UNLIKELY(!new_size)) {
if (!new_size) [[unlikely]] {
FreeInUnknownRoot<free_flags>(ptr);
return nullptr;
}
@@ -2405,11 +2388,11 @@ void* PartitionRoot::ReallocInline(void* ptr,
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
bool overridden = false;
size_t old_usable_size;
if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
if (!no_hooks && hooks_enabled) [[unlikely]] {
overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
&old_usable_size, ptr);
}
if (PA_LIKELY(!overridden)) {
if (!overridden) [[likely]] {
// |ptr| may have been allocated in another root.
SlotSpanMetadata* slot_span = SlotSpanMetadata::FromObject(ptr);
auto* old_root = PartitionRoot::FromSlotSpanMetadata(slot_span);
@@ -2422,7 +2405,7 @@ void* PartitionRoot::ReallocInline(void* ptr,
DCheckIsValidSlotSpan(slot_span);
old_usable_size = old_root->GetSlotUsableSize(slot_span);
if (PA_UNLIKELY(slot_span->bucket->is_direct_mapped())) {
if (slot_span->bucket->is_direct_mapped()) [[unlikely]] {
tried_in_place_for_direct_map = true;
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
@@ -2431,7 +2414,7 @@ void* PartitionRoot::ReallocInline(void* ptr,
}
}
if (success) {
if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
if (!no_hooks && hooks_enabled) [[unlikely]] {
PartitionAllocHooks::ReallocObserverHookIfEnabled(
CreateFreeNotificationData(ptr),
CreateAllocationNotificationData(ptr, new_size, type_name));
@@ -2439,7 +2422,7 @@ void* PartitionRoot::ReallocInline(void* ptr,
return ptr;
}
if (PA_LIKELY(!tried_in_place_for_direct_map)) {
if (!tried_in_place_for_direct_map) [[likely]] {
if (old_root->TryReallocInPlaceForNormalBuckets(ptr, slot_span,
new_size)) {
return ptr;
@@ -2480,7 +2463,7 @@ PartitionRoot::AllocationCapacityFromRequestedSize(size_t size) const {
PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
if (PA_LIKELY(!bucket.is_direct_mapped())) {
if (!bucket.is_direct_mapped()) [[likely]] {
size = bucket.slot_size;
} else if (size > internal::MaxDirectMapped()) {
// Too large to allocate => return the size unchanged.
@@ -2494,9 +2477,9 @@ PartitionRoot::AllocationCapacityFromRequestedSize(size_t size) const {
ThreadCache* PartitionRoot::GetOrCreateThreadCache() {
ThreadCache* thread_cache = nullptr;
if (PA_LIKELY(settings.with_thread_cache)) {
if (settings.with_thread_cache) [[likely]] {
thread_cache = ThreadCache::Get();
if (PA_UNLIKELY(!ThreadCache::IsValid(thread_cache))) {
if (!ThreadCache::IsValid(thread_cache)) [[unlikely]] {
thread_cache = MaybeInitThreadCache();
}
}
@@ -2504,14 +2487,17 @@ ThreadCache* PartitionRoot::GetOrCreateThreadCache() {
}
ThreadCache* PartitionRoot::GetThreadCache() {
return PA_LIKELY(settings.with_thread_cache) ? ThreadCache::Get() : nullptr;
if (settings.with_thread_cache) [[likely]] {
return ThreadCache::Get();
}
return nullptr;
}
// private.
internal::LightweightQuarantineBranch&
PartitionRoot::GetSchedulerLoopQuarantineBranch() {
ThreadCache* thread_cache = GetThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
if (ThreadCache::IsValid(thread_cache)) [[likely]] {
return thread_cache->GetSchedulerLoopQuarantineBranch();
} else {
return *scheduler_loop_quarantine->get();
@@ -15,19 +15,56 @@
#include "partition_alloc/reservation_offset_table.h"
// Should not include partition_root.h, partition_bucket.h, partition_page.h.
// For IsQuarantineAllowed(), use partition_dcheck_helper.h instead of
// partition_root.h.
namespace partition_alloc::internal {
struct WritablePartitionSuperPageExtentEntry;
struct ReadOnlyPartitionSuperPageExtentEntry;
// An "extent" is a span of consecutive superpages. We link the partition's next
// extent (if there is one) to the very start of a superpage's metadata area.
template <const MetadataKind kind>
struct PartitionSuperPageExtentEntry {
PartitionRoot* root;
PartitionSuperPageExtentEntry* next;
uint16_t number_of_consecutive_super_pages;
uint16_t number_of_nonempty_slot_spans;
// The data member of PartitionSuperPageExtentEntry. To make ReadOnly- and
// WritablePartitionSuperPageExtentEntry have the same data member and the
// same memory layout, all the data member are put into this struct. ReadOnly-
// and WritablePartitionSuperPageExtentEntry extend this class without adding
// any data members.
MaybeConstT<kind, PartitionRoot*> root;
MaybeConstT<kind, ReadOnlyPartitionSuperPageExtentEntry*> next;
MaybeConstT<kind, uint16_t> number_of_consecutive_super_pages;
MaybeConstT<kind, uint16_t> number_of_nonempty_slot_spans;
};
struct ReadOnlyPartitionSuperPageExtentEntry
: public PartitionSuperPageExtentEntry<MetadataKind::kReadOnly> {
WritablePartitionSuperPageExtentEntry* ToWritable(
const PartitionRoot* partition_root) {
return ToWritableInternal(root);
}
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
ReadOnlyPartitionSuperPageExtentEntry* ToReadOnly() { return this; }
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
private:
// In order to resolve circular dependencies, i.e. ToWritable() needs
// PartitionRoot, define template method: ToWritableInternal() here and
// ToWritable() uses it.
template <typename T>
WritablePartitionSuperPageExtentEntry* ToWritableInternal(
[[maybe_unused]] T* partition_root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<WritablePartitionSuperPageExtentEntry*>(
reinterpret_cast<intptr_t>(this) + partition_root->ShadowPoolOffset());
#else
return reinterpret_cast<WritablePartitionSuperPageExtentEntry*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
};
struct WritablePartitionSuperPageExtentEntry
: public PartitionSuperPageExtentEntry<MetadataKind::kWritable> {
PA_ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans() {
DCheckNumberOfPartitionPagesInSuperPagePayload(
this, root, number_of_nonempty_slot_spans);
@@ -38,14 +75,44 @@ struct PartitionSuperPageExtentEntry {
PA_DCHECK(number_of_nonempty_slot_spans);
--number_of_nonempty_slot_spans;
}
#if !PA_CONFIG(ENABLE_SHADOW_METADATA)
WritablePartitionSuperPageExtentEntry* ToWritable() { return this; }
#endif // !PA_CONFIG(ENABLE_SHADOW_METADATA)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
ReadOnlyPartitionSuperPageExtentEntry* ToReadOnly(
const PartitionRoot* partition_root) {
return ToReadOnlyInternal(partition_root);
}
private:
// In order to resolve circular dependencies, i.e. ToReadOnly() needs
// PartitionRoot, define template method: ToReadOnlyInternal() and
// ToReadOnly() uses it.
template <typename T>
ReadOnlyPartitionSuperPageExtentEntry* ToReadOnlyInternal(
[[maybe_unused]] T* partition_root) {
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
return reinterpret_cast<ReadOnlyPartitionSuperPageExtentEntry*>(
reinterpret_cast<intptr_t>(this) - partition_root->ShadowPoolOffset());
#else
return reinterpret_cast<ReadOnlyPartitionSuperPageExtentEntry*>(this);
#endif // PA_CONFIG(ENABLE_SHADOW_METADATA)
}
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
};
static_assert(sizeof(ReadOnlyPartitionSuperPageExtentEntry) ==
sizeof(WritablePartitionSuperPageExtentEntry),
"ReadOnlyPartitionSuperPageExtentEntry and "
"WritablePartitionSuperPageExtentEntry must have the same size");
static_assert(
sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
sizeof(ReadOnlyPartitionSuperPageExtentEntry) <= kPageMetadataSize,
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
static_assert(kMaxSuperPagesInPool / kSuperPageSize <=
std::numeric_limits<
decltype(PartitionSuperPageExtentEntry ::
decltype(ReadOnlyPartitionSuperPageExtentEntry ::
number_of_consecutive_super_pages)>::max(),
"number_of_consecutive_super_pages must be big enough");
@@ -55,7 +122,7 @@ static_assert(kMaxSuperPagesInPool / kSuperPageSize <=
// CAUTION! |extent| must point to the extent of the first super page in the
// range of consecutive super pages.
PA_ALWAYS_INLINE uintptr_t
SuperPagesBeginFromExtent(const PartitionSuperPageExtentEntry* extent) {
SuperPagesBeginFromExtent(const ReadOnlyPartitionSuperPageExtentEntry* extent) {
PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
@@ -68,7 +135,7 @@ SuperPagesBeginFromExtent(const PartitionSuperPageExtentEntry* extent) {
// CAUTION! |extent| must point to the extent of the first super page in the
// range of consecutive super pages.
PA_ALWAYS_INLINE uintptr_t
SuperPagesEndFromExtent(const PartitionSuperPageExtentEntry* extent) {
SuperPagesEndFromExtent(const ReadOnlyPartitionSuperPageExtentEntry* extent) {
return SuperPagesBeginFromExtent(extent) +
(extent->number_of_consecutive_super_pages * kSuperPageSize);
}
@@ -108,7 +108,7 @@ PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
DWORD saved_error = GetLastError();
void* ret = TlsGetValue(key);
// Only non-zero errors need to be restored.
if (PA_UNLIKELY(saved_error)) {
if (saved_error) [[unlikely]] {
SetLastError(saved_error);
}
return ret;
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// IWYU pragma: private, include "base/memory/raw_ptr.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_H_
@@ -454,7 +456,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
PA_ALWAYS_INLINE constexpr raw_ptr& operator=(raw_ptr&& p) noexcept {
// Unlike the the copy version of this operator, this branch is necessary
// for correctness.
if (PA_LIKELY(this != &p)) {
if (this != &p) [[likely]] {
Impl::ReleaseWrappedPtr(wrapped_ptr_);
Impl::Untrace(tracer_.owner_id());
wrapped_ptr_ = p.wrapped_ptr_;
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// IWYU pragma: private, include "base/memory/raw_ptr_cast.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_CAST_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_CAST_H_
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// IWYU pragma: private, include "base/memory/raw_ptr_exclusion.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// IWYU pragma: private, include "base/memory/raw_ref.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_REF_H_
#define PARTITION_ALLOC_POINTERS_RAW_REF_H_
@@ -213,9 +213,9 @@ class PoolOffsetFreelistEntry {
// Regular freelists always point to an entry within the same super page.
//
// This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY(entry &&
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
if (entry && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))
[[unlikely]] {
FreelistCorruptionDetected(0);
}
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
@@ -257,7 +257,7 @@ class PoolOffsetFreelistEntry {
// which is meant to prevent from breaking out of the pool in face of
// a corruption (see PoolOffsetFreelistEntry class-level comment).
auto* ret = encoded_next_.Decode(pool_info);
if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(pool_info, this, ret))) {
if (!IsWellFormed<for_thread_cache>(pool_info, this, ret)) [[unlikely]] {
if constexpr (crash_on_corruption) {
// Put the corrupted data on the stack, it may give us more information
// about what kind of corruption that was.
@@ -12,79 +12,40 @@
namespace allocator_shim {
struct AllocatorDispatch {
using AllocFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context);
using AllocAlignedFn = void*(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context);
using ReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
void* context);
using ReallocUncheckedFn = void*(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context);
using FreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
using AllocFn = void*(size_t size, void* context);
using AllocUncheckedFn = void*(size_t size, void* context);
using AllocZeroInitializedFn = void*(size_t n, size_t size, void* context);
using AllocAlignedFn = void*(size_t alignment, size_t size, void* context);
using ReallocFn = void*(void* address, size_t size, void* context);
using ReallocUncheckedFn = void*(void* ptr, size_t size, void* context);
using FreeFn = void(void* address, void* context);
// Returns the allocated size of user data (not including heap overhead).
// Can be larger than the requested size.
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
void* address,
void* context);
using GoodSizeFn = size_t(const AllocatorDispatch* self,
size_t size,
void* context);
using ClaimedAddressFn = bool(const AllocatorDispatch* self,
void* address,
void* context);
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
size_t size,
using GetSizeEstimateFn = size_t(void* address, void* context);
using GoodSizeFn = size_t(size_t size, void* context);
using ClaimedAddressFn = bool(void* address, void* context);
using BatchMallocFn = unsigned(size_t size,
void** results,
unsigned num_requested,
void* context);
using BatchFreeFn = void(const AllocatorDispatch* self,
void** to_be_freed,
using BatchFreeFn = void(void** to_be_freed,
unsigned num_to_be_freed,
void* context);
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context);
using TryFreeDefaultFn = void(const AllocatorDispatch* self,
void* ptr,
void* context);
using AlignedMallocFn = void*(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context);
using AlignedMallocUncheckedFn = void*(const AllocatorDispatch* self,
size_t size,
using FreeDefiniteSizeFn = void(void* ptr, size_t size, void* context);
using TryFreeDefaultFn = void(void* ptr, void* context);
using AlignedMallocFn = void*(size_t size, size_t alignment, void* context);
using AlignedMallocUncheckedFn = void*(size_t size,
size_t alignment,
void* context);
using AlignedReallocFn = void*(const AllocatorDispatch* self,
void* address,
using AlignedReallocFn = void*(void* address,
size_t size,
size_t alignment,
void* context);
using AlignedReallocUncheckedFn = void*(const AllocatorDispatch* self,
void* address,
using AlignedReallocUncheckedFn = void*(void* address,
size_t size,
size_t alignment,
void* context);
using AlignedFreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
using AlignedFreeFn = void(void* address, void* context);
AllocFn* alloc_function;
AllocUncheckedFn* alloc_unchecked_function;
@@ -107,6 +107,16 @@ void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
const AllocatorDispatch* GetAllocatorDispatchChainHeadForTesting();
class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
AutoResetAllocatorDispatchChainForTesting {
public:
AutoResetAllocatorDispatchChainForTesting();
~AutoResetAllocatorDispatchChainForTesting();
private:
const allocator_shim::AllocatorDispatch* original_dispatch_;
};
#if PA_BUILDFLAG(IS_APPLE)
// The fallback function to be called when try_free_default_function receives a
// pointer which doesn't belong to the allocator.
@@ -11,50 +11,41 @@
namespace allocator_shim {
namespace {
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
void* MallocImpl(size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
size);
}
void* CallocImpl(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
void* CallocImpl(size_t n, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
size);
}
void* MemalignImpl(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
void* MemalignImpl(size_t alignment, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
alignment, size);
}
void* ReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
void* context) {
void* ReallocImpl(void* ptr, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
ptr, size);
}
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
void FreeImpl(void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
size_t GetSizeEstimateImpl(void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
size_t GoodSizeImpl(const AllocatorDispatch*, size_t size, void* context) {
size_t GoodSizeImpl(size_t size, void* context) {
// Technically, libmalloc will only call good_size() on the default zone for
// malloc_good_size(), but it doesn't matter that we are calling it on another
// one.
@@ -63,7 +54,7 @@ size_t GoodSizeImpl(const AllocatorDispatch*, size_t size, void* context) {
size);
}
bool ClaimedAddressImpl(const AllocatorDispatch*, void* ptr, void* context) {
bool ClaimedAddressImpl(void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
if (functions.claimed_address) {
return functions.claimed_address(
@@ -75,8 +66,7 @@ bool ClaimedAddressImpl(const AllocatorDispatch*, void* ptr, void* context) {
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
unsigned BatchMallocImpl(const AllocatorDispatch* self,
size_t size,
unsigned BatchMallocImpl(size_t size,
void** results,
unsigned num_requested,
void* context) {
@@ -86,8 +76,7 @@ unsigned BatchMallocImpl(const AllocatorDispatch* self,
num_requested);
}
void BatchFreeImpl(const AllocatorDispatch* self,
void** to_be_freed,
void BatchFreeImpl(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
@@ -95,18 +84,13 @@ void BatchFreeImpl(const AllocatorDispatch* self,
to_be_freed, num_to_be_freed);
}
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context) {
void FreeDefiniteSizeImpl(void* ptr, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free_definite_size(
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
}
void TryFreeDefaultImpl(const AllocatorDispatch* self,
void* ptr,
void* context) {
void TryFreeDefaultImpl(void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
if (functions.try_free_default) {
return functions.try_free_default(
@@ -33,79 +33,63 @@ using allocator_shim::AllocatorDispatch;
// most platforms), and tests expect that.
constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
void* GlibcMalloc(size_t size, void* context) {
// Cannot force glibc's malloc() to crash when a large size is requested, do
// it in the shim instead.
if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
if (size >= kMaxAllowedSize) [[unlikely]] {
partition_alloc::TerminateBecauseOutOfMemory(size);
}
return __libc_malloc(size);
}
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
size_t size,
void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
void* GlibcUncheckedMalloc(size_t size, void* context) {
if (size >= kMaxAllowedSize) [[unlikely]] {
return nullptr;
}
return __libc_malloc(size);
}
void* GlibcCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
void* GlibcCalloc(size_t n, size_t size, void* context) {
const auto total = partition_alloc::internal::base::CheckMul(n, size);
if (PA_UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize)) {
if (!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize) [[unlikely]] {
partition_alloc::TerminateBecauseOutOfMemory(size * n);
}
return __libc_calloc(n, size);
}
void* GlibcRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
void* GlibcRealloc(void* address, size_t size, void* context) {
if (size >= kMaxAllowedSize) [[unlikely]] {
partition_alloc::TerminateBecauseOutOfMemory(size);
}
return __libc_realloc(address, size);
}
void* GlibcUncheckedRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
void* GlibcUncheckedRealloc(void* address, size_t size, void* context) {
if (size >= kMaxAllowedSize) [[unlikely]] {
return nullptr;
}
return __libc_realloc(address, size);
}
void* GlibcMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
void* GlibcMemalign(size_t alignment, size_t size, void* context) {
if (size >= kMaxAllowedSize) [[unlikely]] {
partition_alloc::TerminateBecauseOutOfMemory(size);
}
return __libc_memalign(alignment, size);
}
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
void GlibcFree(void* address, void* context) {
__libc_free(address);
}
PA_NO_SANITIZE("cfi-icall")
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
size_t GlibcGetSizeEstimate(void* address, void* context) {
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
// resolve it instead. This should be safe because glibc (and hence dlfcn)
// does not use malloc_size internally and so there should not be a risk of
@@ -29,38 +29,27 @@ namespace {
using allocator_shim::AllocatorDispatch;
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
void* RealMalloc(size_t size, void* context) {
return __real_malloc(size);
}
void* RealCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
void* RealCalloc(size_t n, size_t size, void* context) {
return __real_calloc(n, size);
}
void* RealRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
void* RealRealloc(void* address, size_t size, void* context) {
return __real_realloc(address, size);
}
void* RealMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
void* RealMemalign(size_t alignment, size_t size, void* context) {
return __real_memalign(alignment, size);
}
void RealFree(const AllocatorDispatch*, void* address, void* context) {
void RealFree(void* address, void* context) {
__real_free(address);
}
size_t RealSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
size_t RealSizeEstimate(void* address, void* context) {
return __real_malloc_usable_size(address);
}
@@ -25,6 +25,7 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_internal.h"
#include "partition_alloc/shim/allocator_shim_internals.h"
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
@@ -70,7 +71,7 @@ class LeakySingleton {
PA_ALWAYS_INLINE T* Get() {
auto* instance = instance_.load(std::memory_order_acquire);
if (PA_LIKELY(instance)) {
if (instance) [[likely]] {
return instance;
}
@@ -198,24 +199,19 @@ void* AllocateAlignedMemory(size_t alignment, size_t size) {
namespace allocator_shim::internal {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
void* PartitionMalloc(size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
}
void* PartitionMallocUnchecked(const AllocatorDispatch*,
size_t size,
void* context) {
void* PartitionMallocUnchecked(size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return Allocator()
->AllocInline<partition_alloc::AllocFlags::kReturnNull |
partition_alloc::AllocFlags::kNoHooks>(size);
}
void* PartitionCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
void* PartitionCalloc(size_t n, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
const size_t total =
partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
@@ -224,26 +220,19 @@ void* PartitionCalloc(const AllocatorDispatch*,
partition_alloc::AllocFlags::kNoHooks>(total);
}
void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
void* PartitionMemalign(size_t alignment, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return AllocateAlignedMemory<partition_alloc::AllocFlags::kNoHooks>(alignment,
size);
}
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context) {
void* PartitionAlignedAlloc(size_t size, size_t alignment, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return AllocateAlignedMemory<partition_alloc::AllocFlags::kNoHooks>(alignment,
size);
}
void* PartitionAlignedAllocUnchecked(const AllocatorDispatch* dispatch,
size_t size,
void* PartitionAlignedAllocUnchecked(size_t size,
size_t alignment,
void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
@@ -258,8 +247,7 @@ void* PartitionAlignedAllocUnchecked(const AllocatorDispatch* dispatch,
// This realloc always free the original memory block and allocates a new memory
// block.
// TODO(tasak): Implement PartitionRoot::AlignedRealloc and use it.
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
void* address,
void* PartitionAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context) {
@@ -292,8 +280,7 @@ void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
return new_ptr;
}
void* PartitionAlignedReallocUnchecked(const AllocatorDispatch* dispatch,
void* address,
void* PartitionAlignedReallocUnchecked(void* address,
size_t size,
size_t alignment,
void* context) {
@@ -327,15 +314,12 @@ void* PartitionAlignedReallocUnchecked(const AllocatorDispatch* dispatch,
return new_ptr;
}
void* PartitionRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
void* PartitionRealloc(void* address, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
#if PA_BUILDFLAG(IS_APPLE)
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address)) {
if (!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address) [[unlikely]] {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `realloc` which supports zone-
// dispatching so that it appropriately selects the right zone.
@@ -347,15 +331,12 @@ void* PartitionRealloc(const AllocatorDispatch*,
size, "");
}
void* PartitionReallocUnchecked(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
void* PartitionReallocUnchecked(void* address, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
#if PA_BUILDFLAG(IS_APPLE)
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address)) {
if (!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address) [[unlikely]] {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `realloc` which supports zone-
// dispatching so that it appropriately selects the right zone.
@@ -374,13 +355,13 @@ void __real_free(void*);
} // extern "C"
#endif // PA_BUILDFLAG(IS_CAST_ANDROID)
void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
void PartitionFree(void* object, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
#if PA_BUILDFLAG(IS_APPLE)
// TODO(bartekn): Add MTE unmasking here (and below).
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
if (!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object) [[unlikely]] {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free` which supports zone-
// dispatching so that it appropriately selects the right zone.
@@ -393,9 +374,9 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
// the pointer, pass it along. This should not have a runtime cost vs regular
// Android, since on Android we have a PA_CHECK() rather than the branch here.
#if PA_BUILDFLAG(IS_CAST_ANDROID)
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
if (!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object) [[unlikely]] {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free()`, which is `__real_free()`
// here.
@@ -414,10 +395,7 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
//
// So we don't need to re-check that the pointer is owned in Free(), and we
// can use the size.
void PartitionFreeDefiniteSize(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
void PartitionFreeDefiniteSize(void* address, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
// still useful though, as we avoid double-checking that the address is owned.
@@ -426,9 +404,7 @@ void PartitionFreeDefiniteSize(const AllocatorDispatch*,
}
#endif // PA_BUILDFLAG(IS_APPLE)
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
size_t PartitionGetSizeEstimate(void* address, void* context) {
// This is used to implement malloc_usable_size(3). Per its man page, "if ptr
// is NULL, 0 is returned".
if (!address) {
@@ -459,20 +435,17 @@ size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
}
#if PA_BUILDFLAG(IS_APPLE)
size_t PartitionGoodSize(const AllocatorDispatch*, size_t size, void* context) {
size_t PartitionGoodSize(size_t size, void* context) {
return Allocator()->AllocationCapacityFromRequestedSize(size);
}
bool PartitionClaimedAddress(const AllocatorDispatch*,
void* address,
void* context) {
bool PartitionClaimedAddress(void* address, void* context) {
return partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address));
}
#endif // PA_BUILDFLAG(IS_APPLE)
unsigned PartitionBatchMalloc(const AllocatorDispatch*,
size_t size,
unsigned PartitionBatchMalloc(size_t size,
void** results,
unsigned num_requested,
void* context) {
@@ -480,32 +453,29 @@ unsigned PartitionBatchMalloc(const AllocatorDispatch*,
// simple for now.
for (unsigned i = 0; i < num_requested; i++) {
// No need to check the results, we crash if it fails.
results[i] = PartitionMalloc(nullptr, size, nullptr);
results[i] = PartitionMalloc(size, nullptr);
}
// Either all succeeded, or we crashed.
return num_requested;
}
void PartitionBatchFree(const AllocatorDispatch*,
void** to_be_freed,
void PartitionBatchFree(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// No real batching: we could only acquire the lock once for instance, keep it
// simple for now.
for (unsigned i = 0; i < num_to_be_freed; i++) {
PartitionFree(nullptr, to_be_freed[i], nullptr);
PartitionFree(to_be_freed[i], nullptr);
}
}
#if PA_BUILDFLAG(IS_APPLE)
void PartitionTryFreeDefault(const AllocatorDispatch*,
void* address,
void* context) {
void PartitionTryFreeDefault(void* address, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)))) {
if (!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address))) [[unlikely]] {
// The object pointed to by `address` is not allocated by the
// PartitionAlloc. Call find_zone_and_free.
return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
@@ -653,51 +623,11 @@ void AdjustDefaultAllocatorForBackground() {
} // namespace allocator_shim
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&allocator_shim::internal::PartitionMalloc, // alloc_function
&allocator_shim::internal::
PartitionMallocUnchecked, // alloc_unchecked_function
&allocator_shim::internal::
PartitionCalloc, // alloc_zero_initialized_function
&allocator_shim::internal::PartitionMemalign, // alloc_aligned_function
&allocator_shim::internal::PartitionRealloc, // realloc_function
&allocator_shim::internal::
PartitionReallocUnchecked, // realloc_unchecked_function
&allocator_shim::internal::PartitionFree, // free_function
&allocator_shim::internal::
PartitionGetSizeEstimate, // get_size_estimate_function
#if PA_BUILDFLAG(IS_APPLE)
&allocator_shim::internal::PartitionGoodSize, // good_size
&allocator_shim::internal::PartitionClaimedAddress, // claimed_address
#else
nullptr, // good_size
nullptr, // claimed_address
#endif
&allocator_shim::internal::PartitionBatchMalloc, // batch_malloc_function
&allocator_shim::internal::PartitionBatchFree, // batch_free_function
#if PA_BUILDFLAG(IS_APPLE)
// On Apple OSes, free_definite_size() is always called from free(), since
// get_size_estimate() is used to determine whether an allocation belongs to
// the current zone. It makes sense to optimize for it.
&allocator_shim::internal::PartitionFreeDefiniteSize,
// On Apple OSes, try_free_default() is sometimes called as an optimization
// of free().
&allocator_shim::internal::PartitionTryFreeDefault,
#else
nullptr, // free_definite_size_function
nullptr, // try_free_default_function
#endif
&allocator_shim::internal::
PartitionAlignedAlloc, // aligned_malloc_function
&allocator_shim::internal::
PartitionAlignedAllocUnchecked, // aligned_malloc_unchecked_function
&allocator_shim::internal::
PartitionAlignedRealloc, // aligned_realloc_function
&allocator_shim::internal::
PartitionAlignedReallocUnchecked, // aligned_realloc_unchecked_function
&allocator_shim::internal::PartitionFree, // aligned_free_function
nullptr, // next
};
#if !PA_BUILDFLAG( \
ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
const AllocatorDispatch AllocatorDispatch::default_dispatch =
internal::kPartitionAllocDispatch;
#endif // !PA_BUILDFLAG(ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
// Intercept diagnostics symbols as well, even though they are not part of the
// unified shim layer.
@@ -14,8 +14,6 @@
namespace allocator_shim {
struct AllocatorDispatch;
namespace internal {
class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) PartitionAllocMalloc {
@@ -30,70 +28,77 @@ class PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) PartitionAllocMalloc {
};
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context);
void* PartitionMalloc(size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionMallocUnchecked(const AllocatorDispatch*,
size_t size,
void* context);
void* PartitionMallocUnchecked(size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context);
void* PartitionCalloc(size_t n, size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context);
void* PartitionMemalign(size_t alignment, size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context);
void* PartitionAlignedAlloc(size_t size, size_t alignment, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionAlignedAllocUnchecked(const AllocatorDispatch* dispatch,
size_t size,
void* PartitionAlignedAllocUnchecked(size_t size,
size_t alignment,
void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
void* address,
void* PartitionAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionAlignedReallocUnchecked(const AllocatorDispatch* dispatch,
void* address,
void* PartitionAlignedReallocUnchecked(void* address,
size_t size,
size_t alignment,
void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context);
void* PartitionRealloc(void* address, size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* PartitionReallocUnchecked(const AllocatorDispatch*,
void* address,
size_t size,
void* context);
void* PartitionReallocUnchecked(void* address, size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void PartitionFree(const AllocatorDispatch*, void* object, void* context);
void PartitionFree(void* object, void* context);
#if PA_BUILDFLAG(IS_APPLE)
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void PartitionFreeDefiniteSize(void* address, size_t size, void* context);
#endif // PA_BUILDFLAG(IS_APPLE)
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context);
size_t PartitionGetSizeEstimate(void* address, void* context);
#if PA_BUILDFLAG(IS_APPLE)
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
size_t PartitionGoodSize(size_t size, void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
bool PartitionClaimedAddress(void* address, void* context);
#endif // PA_BUILDFLAG(IS_APPLE)
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
unsigned PartitionBatchMalloc(size_t size,
void** results,
unsigned num_requested,
void* context);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void PartitionBatchFree(void** to_be_freed,
unsigned num_to_be_freed,
void* context);
#if PA_BUILDFLAG(IS_APPLE)
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void PartitionTryFreeDefault(void* address, void* context);
#endif // PA_BUILDFLAG(IS_APPLE)
} // namespace internal
@@ -102,16 +107,18 @@ size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
// we're making it more resilient to ConfigurePartitions() interface changes, so
// that we don't have to modify multiple callers. This is particularly important
// when callers are in a different repo, like PDFium or Dawn.
PA_ALWAYS_INLINE void ConfigurePartitionsForTesting(
bool enable_memory_tagging_if_available = true) {
// -----------------------------------------------------------------------------
// DO NOT MODIFY this signature. This is meant for partition_alloc's embedders
// only, so that partition_alloc can evolve without breaking them.
// Chromium/PartitionAlloc are part of the same repo, they must not depend on
// this function. They should call ConfigurePartitions() directly.
PA_ALWAYS_INLINE void ConfigurePartitionsForTesting() {
auto enable_brp = allocator_shim::EnableBrp(true);
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
auto enable_memory_tagging =
allocator_shim::EnableMemoryTagging(enable_memory_tagging_if_available);
#else
auto enable_memory_tagging = allocator_shim::EnableMemoryTagging(false);
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
// Embedders's tests might benefit from MTE checks. However, this is costly
// and shouldn't be used in benchmarks.
auto enable_memory_tagging = allocator_shim::EnableMemoryTagging(
PA_BUILDFLAG(HAS_MEMORY_TAGGING) && PA_BUILDFLAG(DCHECKS_ARE_ON));
// Since the only user of this function is a test function, we use
// synchronous reporting mode, if MTE is enabled.
@@ -0,0 +1,69 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_INTERNAL_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_INTERNAL_H_
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
namespace allocator_shim::internal {
// Only allocator_shim component must include this header, because
// PartitionMalloc, PartitionMallocUnchecked, ... are DLL-exported when
// is_component_build=true and is_win=true. In the case, the other component
// needs to import the symbols from allocator_shim.dll...so, not constexpr.
inline constexpr AllocatorDispatch kPartitionAllocDispatch = {
&allocator_shim::internal::PartitionMalloc, // alloc_function
&allocator_shim::internal::
PartitionMallocUnchecked, // alloc_unchecked_function
&allocator_shim::internal::
PartitionCalloc, // alloc_zero_initialized_function
&allocator_shim::internal::PartitionMemalign, // alloc_aligned_function
&allocator_shim::internal::PartitionRealloc, // realloc_function
&allocator_shim::internal::
PartitionReallocUnchecked, // realloc_unchecked_function
&allocator_shim::internal::PartitionFree, // free_function
&allocator_shim::internal::
PartitionGetSizeEstimate, // get_size_estimate_function
#if PA_BUILDFLAG(IS_APPLE)
&allocator_shim::internal::PartitionGoodSize, // good_size
&allocator_shim::internal::PartitionClaimedAddress, // claimed_address
#else
nullptr, // good_size
nullptr, // claimed_address
#endif
&allocator_shim::internal::PartitionBatchMalloc, // batch_malloc_function
&allocator_shim::internal::PartitionBatchFree, // batch_free_function
#if PA_BUILDFLAG(IS_APPLE)
// On Apple OSes, free_definite_size() is always called from free(), since
// get_size_estimate() is used to determine whether an allocation belongs to
// the current zone. It makes sense to optimize for it.
&allocator_shim::internal::PartitionFreeDefiniteSize,
// On Apple OSes, try_free_default() is sometimes called as an optimization
// of free().
&allocator_shim::internal::PartitionTryFreeDefault,
#else
nullptr, // free_definite_size_function
nullptr, // try_free_default_function
#endif
&allocator_shim::internal::
PartitionAlignedAlloc, // aligned_malloc_function
&allocator_shim::internal::
PartitionAlignedAllocUnchecked, // aligned_malloc_unchecked_function
&allocator_shim::internal::
PartitionAlignedRealloc, // aligned_realloc_function
&allocator_shim::internal::
PartitionAlignedReallocUnchecked, // aligned_realloc_unchecked_function
&allocator_shim::internal::PartitionFree, // aligned_free_function
nullptr, // next
};
} // namespace allocator_shim::internal
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#endif // PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_INTERNAL_H_
@@ -0,0 +1,95 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_with_advanced_checks.h"
#include <atomic>
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_internal.h"
namespace allocator_shim {
namespace {
std::atomic<const AllocatorDispatch*> g_delegate_dispatch =
&internal::kPartitionAllocDispatch;
PA_ALWAYS_INLINE const AllocatorDispatch* GetDelegate() {
return g_delegate_dispatch.load(std::memory_order_relaxed);
}
} // namespace
void InstallDispatchToPartitionAllocWithAdvancedChecks(
AllocatorDispatch* dispatch) {
PA_DCHECK(dispatch);
// Must have followings:
PA_DCHECK(dispatch->realloc_function != nullptr);
PA_DCHECK(dispatch->free_function != nullptr);
// Must not have followings:
PA_DCHECK(dispatch->alloc_function == nullptr);
PA_DCHECK(dispatch->alloc_unchecked_function == nullptr);
PA_DCHECK(dispatch->alloc_zero_initialized_function == nullptr);
PA_DCHECK(dispatch->alloc_aligned_function == nullptr);
PA_DCHECK(dispatch->realloc_unchecked_function == nullptr);
PA_DCHECK(dispatch->get_size_estimate_function == nullptr);
PA_DCHECK(dispatch->good_size_function == nullptr);
PA_DCHECK(dispatch->claimed_address_function == nullptr);
PA_DCHECK(dispatch->batch_malloc_function == nullptr);
PA_DCHECK(dispatch->batch_free_function == nullptr);
PA_DCHECK(dispatch->free_definite_size_function == nullptr);
PA_DCHECK(dispatch->try_free_default_function == nullptr);
PA_DCHECK(dispatch->aligned_malloc_function == nullptr);
PA_DCHECK(dispatch->aligned_malloc_unchecked_function == nullptr);
PA_DCHECK(dispatch->aligned_realloc_function == nullptr);
PA_DCHECK(dispatch->aligned_realloc_unchecked_function == nullptr);
PA_DCHECK(dispatch->aligned_free_function == nullptr);
dispatch->next = &internal::kPartitionAllocDispatch;
// Unlike `InsertAllocatorDispatch(...)`, we don't have any invariant here.
// Hence using relaxed memory ordering.
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
g_delegate_dispatch.store(dispatch, std::memory_order_relaxed);
#else
const AllocatorDispatch* previous_value =
g_delegate_dispatch.exchange(dispatch, std::memory_order_relaxed);
// We also allow `previous_value == dispatch` i.e. `dispatch` is written
// twice - sometimes it is hard to guarantee "exactly once" initialization.
PA_DCHECK(previous_value == &internal::kPartitionAllocDispatch ||
previous_value == dispatch);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
}
void UninstallDispatchToPartitionAllocWithAdvancedChecks() {
g_delegate_dispatch.store(&internal::kPartitionAllocDispatch,
std::memory_order_relaxed);
}
namespace internal {
void FreeWithAdvancedChecks(void* address, void* context) {
const AllocatorDispatch* delegate = GetDelegate();
PA_MUSTTAIL return delegate->free_function(delegate, address, context);
}
void* ReallocWithAdvancedChecks(void* address, size_t size, void* context) {
const AllocatorDispatch* delegate = GetDelegate();
PA_MUSTTAIL return delegate->realloc_function(delegate, address, size,
context);
}
} // namespace internal
const AllocatorDispatch AllocatorDispatch::default_dispatch = []() constexpr {
AllocatorDispatch dispatch = internal::kPartitionAllocDispatch;
dispatch.realloc_function = &internal::ReallocWithAdvancedChecks;
dispatch.free_function = &internal::FreeWithAdvancedChecks;
return dispatch;
}();
} // namespace allocator_shim
@@ -0,0 +1,43 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_WITH_ADVANCED_CHECKS_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_WITH_ADVANCED_CHECKS_H_
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#if !PA_BUILDFLAG( \
ENABLE_ALLOCATOR_SHIM_PARTITION_ALLOC_DISPATCH_WITH_ADVANCED_CHECKS_SUPPORT)
#error PartitionAlloc with Advanced Checks is not available in this build \
configuration.
#endif
// PartitionAlloc with Advanced Checks is a feature to install extra safety
// checks into PartitionAlloc, on opt-in at runtime basis.
// `InsertAllocatorDispatch()` API is not capable of this feature as it always
// inserts the new dispatch at beginning of the chain. As Dispatch here captures
// 100% requests, it will result in all other sampling-based feature nullified.
// Instead, this feature replaces a default dispatch at compile-time, and
// forwards all requests to `allocator_shim::(anonymous
// namespace)::g_delegate_dispatch`. `g_delegate_dispatch` can be either normal
// PA or PA with Advanced Checks. There will be very slight but non-zero cost
// for this one extra trampoline call. To minimize the cost, only following
// functions are delegated.
//
// - `AllocatorDispatch::free_function`
// - `AllocatorDispatch::realloc_function`
namespace allocator_shim {
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void InstallDispatchToPartitionAllocWithAdvancedChecks(
AllocatorDispatch* dispatch);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void UninstallDispatchToPartitionAllocWithAdvancedChecks();
} // namespace allocator_shim
#endif // PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_WITH_ADVANCED_CHECKS_H_
@@ -13,16 +13,11 @@ namespace {
using allocator_shim::AllocatorDispatch;
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
size_t size,
void* context) {
void* DefaultWinHeapMallocImpl(size_t size, void* context) {
return allocator_shim::WinHeapMalloc(size);
}
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
size_t n,
size_t elem_size,
void* context) {
void* DefaultWinHeapCallocImpl(size_t n, size_t elem_size, void* context) {
// Overflow check.
const size_t size = n * elem_size;
if (elem_size != 0 && size / elem_size != n) {
@@ -36,51 +31,37 @@ void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
return result;
}
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
void* DefaultWinHeapMemalignImpl(size_t alignment, size_t size, void* context) {
PA_CHECK(false) << "The windows heap does not support memalign.";
return nullptr;
}
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
void* DefaultWinHeapReallocImpl(void* address, size_t size, void* context) {
return allocator_shim::WinHeapRealloc(address, size);
}
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
void* address,
void* context) {
void DefaultWinHeapFreeImpl(void* address, void* context) {
allocator_shim::WinHeapFree(address);
}
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
void* address,
void* context) {
size_t DefaultWinHeapGetSizeEstimateImpl(void* address, void* context) {
return allocator_shim::WinHeapGetSizeEstimate(address);
}
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
size_t size,
void* DefaultWinHeapAlignedMallocImpl(size_t size,
size_t alignment,
void* context) {
return allocator_shim::WinHeapAlignedMalloc(size, alignment);
}
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
void* ptr,
void* DefaultWinHeapAlignedReallocImpl(void* ptr,
size_t size,
size_t alignment,
void* context) {
return allocator_shim::WinHeapAlignedRealloc(ptr, size, alignment);
}
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
void* ptr,
void* context) {
void DefaultWinHeapAlignedFreeImpl(void* ptr, void* context) {
allocator_shim::WinHeapAlignedFree(ptr);
}
@@ -13,25 +13,15 @@
namespace allocator_shim {
namespace {
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {}
void FreeFn(void* address, void* context) {}
void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {}
void BatchFreeFn(void** to_be_freed, unsigned num_to_be_freed, void* context) {}
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {}
void FreeDefiniteSizeFn(void* address, size_t size, void* context) {}
void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {}
void TryFreeDefaultFn(void* address, void* context) {}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {}
static void AlignedFreeFn(void* address, void* context) {}
AllocatorDispatch allocator_dispatch = {
nullptr, // alloc_function
@@ -17,6 +17,7 @@
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/shim/allocator_dispatch.h"
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim_internals.h"
#if PA_BUILDFLAG(IS_WIN)
@@ -63,34 +64,33 @@ void SetCallNewHandlerOnMallocFailure(bool value) {
void* UncheckedAlloc(size_t size) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
return chain_head->alloc_unchecked_function(size, nullptr);
}
void* UncheckedRealloc(void* ptr, size_t size) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->realloc_unchecked_function(chain_head, ptr, size, nullptr);
return chain_head->realloc_unchecked_function(ptr, size, nullptr);
}
void UncheckedFree(void* ptr) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->free_function(chain_head, ptr, nullptr);
return chain_head->free_function(ptr, nullptr);
}
void* UncheckedAlignedAlloc(size_t size, size_t align) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->aligned_malloc_unchecked_function(chain_head, size, align,
nullptr);
return chain_head->aligned_malloc_unchecked_function(size, align, nullptr);
}
void* UncheckedAlignedRealloc(void* ptr, size_t size, size_t align) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->aligned_realloc_unchecked_function(chain_head, ptr, size,
align, nullptr);
return chain_head->aligned_realloc_unchecked_function(ptr, size, align,
nullptr);
}
void UncheckedAlignedFree(void* ptr) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->aligned_free_function(chain_head, ptr, nullptr);
return chain_head->aligned_free_function(ptr, nullptr);
}
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
@@ -134,6 +134,17 @@ const AllocatorDispatch* GetAllocatorDispatchChainHeadForTesting() {
return internal::GetChainHead();
}
AutoResetAllocatorDispatchChainForTesting::
AutoResetAllocatorDispatchChainForTesting() {
original_dispatch_ = internal::g_chain_head.exchange(
&allocator_shim::AllocatorDispatch::default_dispatch);
}
AutoResetAllocatorDispatchChainForTesting::
~AutoResetAllocatorDispatchChainForTesting() {
internal::g_chain_head = original_dispatch_;
}
} // namespace allocator_shim
#endif // PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_FUNCTIONS_H_
@@ -63,7 +63,7 @@ SHIM_ALWAYS_EXPORT size_t __wrap_malloc_usable_size(void* address) {
return ShimGetSizeEstimate(address, nullptr);
}
const size_t kPathMaxSize = 8192;
inline constexpr size_t kPathMaxSize = 8192;
static_assert(kPathMaxSize >= PATH_MAX, "");
extern char* __wrap_strdup(const char* str);
@@ -119,7 +119,7 @@ SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
if (!size) {
size = kPathMaxSize;
}
char local_buffer[size];
char local_buffer[kPathMaxSize];
if (!__real_getcwd(local_buffer, size)) {
return nullptr;
}
@@ -213,6 +213,14 @@ char* _strdup(const char* strSource) {
return dest;
}
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
wchar_t* _wcsdup(const wchar_t* strSource) {
wchar_t* dest =
static_cast<wchar_t*>(malloc(sizeof(wchar_t) * (wcslen(strSource) + 1)));
wcscpy(dest, strSource);
return dest;
}
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
errno_t _dupenv_s(char** buffer,
size_t* number_of_elements,
@@ -22,7 +22,7 @@ namespace {
PA_ALWAYS_INLINE size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize) {
if (pagesize == 0) [[unlikely]] {
pagesize = partition_alloc::internal::base::GetPageSize();
}
return pagesize;
@@ -52,89 +52,99 @@ extern "C" {
PA_ALWAYS_INLINE void* ShimCppNew(size_t size) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_function(chain_head, size, context);
} while (PA_UNLIKELY(!ptr && allocator_shim::internal::CallNewHandler(size)));
return ptr;
}
PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
// `[[unlikely]]` is _not_ effective when used in the form of
// `do [[unlikely]] { ... } while (expr);`, so we use the following form
// instead.
void* ptr = chain_head->alloc_function(size, context);
while (!ptr && allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->alloc_function(size, context);
}
return ptr;
}
PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, context);
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
return chain_head->alloc_unchecked_function(size, context);
}
PA_ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (PA_UNLIKELY(!ptr && allocator_shim::internal::CallNewHandler(size)));
return ptr;
}
PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
void* ptr = chain_head->alloc_aligned_function(alignment, size, context);
while (!ptr && allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->alloc_aligned_function(alignment, size, context);
}
return ptr;
}
PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->free_function(chain_head, address, context);
void* context = nullptr;
#if PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
return chain_head->free_function(address, context);
}
PA_ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_function(chain_head, size, context);
} while (PA_UNLIKELY(
!ptr && allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr = chain_head->alloc_function(size, context);
while (!ptr &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->alloc_function(size, context);
}
return ptr;
}
PA_ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
context);
} while (PA_UNLIKELY(
!ptr && allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr = chain_head->alloc_zero_initialized_function(n, size, context);
while (!ptr &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->alloc_zero_initialized_function(n, size, context);
}
return ptr;
}
PA_ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
// realloc(size == 0) means free() and might return a nullptr. We should
// not call the std::new_handler in that case, though.
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
ptr = chain_head->realloc_function(chain_head, address, size, context);
} while (PA_UNLIKELY(
!ptr && size &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr = chain_head->realloc_function(address, size, context);
// realloc(size == 0) means free() and might return a nullptr. We should
// not call the std::new_handler in that case, though.
while (!ptr && size != 0 &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->realloc_function(address, size, context);
}
return ptr;
}
@@ -143,13 +153,14 @@ PA_ALWAYS_INLINE void* ShimMemalign(size_t alignment,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (PA_UNLIKELY(
!ptr && allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr = chain_head->alloc_aligned_function(alignment, size, context);
while (!ptr &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->alloc_aligned_function(alignment, size, context);
}
return ptr;
}
@@ -159,12 +170,16 @@ PA_ALWAYS_INLINE int ShimPosixMemalign(void** res,
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
// in tc_malloc.cc.
if (((alignment % sizeof(void*)) != 0) ||
!partition_alloc::internal::base::bits::HasSingleBit(alignment)) {
!partition_alloc::internal::base::bits::HasSingleBit(alignment))
[[unlikely]] {
return EINVAL;
}
void* ptr = ShimMemalign(alignment, size, nullptr);
*res = ptr;
return ptr ? 0 : ENOMEM;
if (ptr) [[likely]] {
return 0;
}
return ENOMEM;
}
PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
@@ -173,41 +188,41 @@ PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
PA_ALWAYS_INLINE void* ShimPvalloc(size_t size) {
// pvalloc(0) should allocate one page, according to its man page.
if (size == 0) {
size = GetCachedPageSize();
size_t page_size = GetCachedPageSize();
if (size == 0) [[unlikely]] {
size = page_size;
} else {
size = partition_alloc::internal::base::bits::AlignUp(size,
GetCachedPageSize());
size = partition_alloc::internal::base::bits::AlignUp(size, page_size);
}
// The third argument is nullptr because pvalloc is glibc only and does not
// exist on OSX/BSD systems.
return ShimMemalign(GetCachedPageSize(), size, nullptr);
return ShimMemalign(page_size, size, nullptr);
}
PA_ALWAYS_INLINE void ShimFree(void* address, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->free_function(chain_head, address, context);
return chain_head->free_function(address, context);
}
PA_ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->get_size_estimate_function(
chain_head, const_cast<void*>(address), context);
return chain_head->get_size_estimate_function(const_cast<void*>(address),
context);
}
PA_ALWAYS_INLINE size_t ShimGoodSize(size_t size, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->good_size_function(chain_head, size, context);
return chain_head->good_size_function(size, context);
}
PA_ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->claimed_address_function(chain_head, address, context);
return chain_head->claimed_address_function(address, context);
}
PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
@@ -216,8 +231,8 @@ PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->batch_malloc_function(chain_head, size, results,
num_requested, context);
return chain_head->batch_malloc_function(size, results, num_requested,
context);
}
PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
@@ -225,8 +240,7 @@ PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->batch_free_function(chain_head, to_be_freed,
num_to_be_freed, context);
return chain_head->batch_free_function(to_be_freed, num_to_be_freed, context);
}
PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
@@ -234,14 +248,13 @@ PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->free_definite_size_function(chain_head, ptr, size,
context);
return chain_head->free_definite_size_function(ptr, size, context);
}
PA_ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->try_free_default_function(chain_head, ptr, context);
return chain_head->try_free_default_function(ptr, context);
}
PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
@@ -249,13 +262,14 @@ PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
context);
} while (PA_UNLIKELY(
!ptr && allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr = chain_head->aligned_malloc_function(size, alignment, context);
while (!ptr &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr = chain_head->aligned_malloc_function(size, alignment, context);
}
return ptr;
}
@@ -263,25 +277,27 @@ PA_ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context) {
// _aligned_realloc(size == 0) means _aligned_free() and might return a
// nullptr. We should not call the std::new_handler in that case, though.
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
alignment, context);
} while (PA_UNLIKELY(
!ptr && size &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)));
void* ptr =
chain_head->aligned_realloc_function(address, size, alignment, context);
// _aligned_realloc(size == 0) means _aligned_free() and might return a
// nullptr. We should not call the std::new_handler in that case, though.
while (!ptr && size != 0 &&
allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
allocator_shim::internal::CallNewHandler(size)) [[unlikely]] {
ptr =
chain_head->aligned_realloc_function(address, size, alignment, context);
}
return ptr;
}
PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
const allocator_shim::AllocatorDispatch* const chain_head =
allocator_shim::internal::GetChainHead();
return chain_head->aligned_free_function(chain_head, address, context);
return chain_head->aligned_free_function(address, context);
}
#undef PA_ALWAYS_INLINE
@@ -54,7 +54,7 @@ void SpinningMutex::AcquireSpinThenBlock() {
int tries = 0;
int backoff = 1;
do {
if (PA_LIKELY(Try())) {
if (Try()) [[likely]] {
return;
}
// Note: Per the intel optimization manual
@@ -111,10 +111,10 @@ class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
};
PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
// Not marked PA_LIKELY(), as:
// Not marked `[[likely]]`, as:
// 1. We don't know how much contention the lock would experience
// 2. This may lead to weird-looking code layout when inlined into a caller
// with PA_(UN)LIKELY() annotations.
// with `[[(un)likely]]` attributes.
if (Try()) {
return;
}
@@ -142,8 +142,8 @@ PA_ALWAYS_INLINE bool SpinningMutex::Try() {
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
if (PA_UNLIKELY(state_.exchange(kUnlocked, std::memory_order_release) ==
kLockedContended)) {
if (state_.exchange(kUnlocked, std::memory_order_release) == kLockedContended)
[[unlikely]] {
// |kLockedContended|: there is a waiter to wake up.
//
// Here there is a window where the lock is unlocked, since we just set it
@@ -182,7 +182,7 @@ void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
}
void* RemaskVoidPtrForMTE(void* ptr) {
if (PA_LIKELY(ptr)) {
if (ptr) [[likely]] {
// Can't look up the tag for a null ptr (segfaults).
return __arm_mte_get_tag(ptr);
}
@@ -318,7 +318,7 @@ bool PermissiveMte::HandleCrash(int signo,
SuspendTagCheckingScope::SuspendTagCheckingScope() noexcept {
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (PA_UNLIKELY(internal::base::CPU::GetInstanceNoAllocation().has_mte())) {
if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) [[unlikely]] {
asm volatile(
R"(
.arch_extension memtag
@@ -332,7 +332,7 @@ SuspendTagCheckingScope::SuspendTagCheckingScope() noexcept {
SuspendTagCheckingScope::~SuspendTagCheckingScope() {
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
if (PA_UNLIKELY(internal::base::CPU::GetInstanceNoAllocation().has_mte())) {
if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) [[unlikely]] {
// Restore previous tco value.
__asm__ __volatile__(
R"(
@@ -42,13 +42,17 @@ void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode);
namespace internal {
constexpr uint64_t kMemTagGranuleSize = 16u;
inline constexpr uint64_t kMemTagGranuleSize = 16u;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
inline constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
inline constexpr size_t kPtrTagShift = 56;
static_assert(kPtrTagMask == (0xffULL << kPtrTagShift),
"kPtrTagMask and kPtrTagShift must be consistent");
#else
constexpr uint64_t kPtrTagMask = 0;
inline constexpr uint64_t kPtrTagMask = 0;
inline constexpr size_t kPtrTagShift = 0;
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
inline constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
#if PA_BUILDFLAG(IS_ANDROID)
// Changes the memory tagging mode for all threads in the current process.
@@ -93,10 +97,9 @@ PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(uintptr_t address, size_t size) {
// Randomly changes the tag of the ptr memory range. Useful for initial random
// initialization. Returns the pointer with the new tag. Ensures that the entire
// range is set to the same tag.
PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t address,
PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(void* ptr,
size_t size,
uint64_t mask = 0u) {
void* ptr = reinterpret_cast<void*>(address);
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
return reinterpret_cast<void*>(
TagMemoryRangeRandomlyInternal(ptr, size, mask));
@@ -105,6 +108,12 @@ PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t address,
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
}
PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t address,
size_t size,
uint64_t mask = 0u) {
return TagMemoryRangeRandomly(reinterpret_cast<void*>(address), size, mask);
}
// Gets a version of ptr that's safe to dereference.
template <typename T>
PA_ALWAYS_INLINE T* TagPtr(T* ptr) {
@@ -130,6 +139,13 @@ PA_ALWAYS_INLINE uintptr_t UntagAddr(uintptr_t address) {
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
}
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
template <typename T>
inline uint8_t ExtractTagFromPtr(T* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) >> kPtrTagShift) & 0xf;
}
#endif // PA_BUILDFLAG(HAS_MEMORY_TAGGING)
} // namespace internal
// Strips the tag bits off |ptr|.
@@ -478,7 +478,7 @@ PA_ALWAYS_INLINE std::optional<size_t> ThreadCache::MaybePutInCache(
PA_REENTRANCY_GUARD(is_in_thread_cache_);
PA_INCREMENT_COUNTER(stats_.cache_fill_count);
if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
if (bucket_index > largest_active_bucket_index_) [[unlikely]] {
PA_INCREMENT_COUNTER(stats_.cache_fill_misses);
return std::nullopt;
}
@@ -497,11 +497,11 @@ PA_ALWAYS_INLINE std::optional<size_t> ThreadCache::MaybePutInCache(
// gambling that the compiler would not issue multiple loads.
uint8_t limit = bucket.limit.load(std::memory_order_relaxed);
// Batched deallocation, amortizing lock acquisitions.
if (PA_UNLIKELY(bucket.count > limit)) {
if (bucket.count > limit) [[unlikely]] {
ClearBucket(bucket, limit / 2);
}
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
if (should_purge_.load(std::memory_order_relaxed)) [[unlikely]] {
PurgeInternal();
}
@@ -517,14 +517,14 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_REENTRANCY_GUARD(is_in_thread_cache_);
PA_INCREMENT_COUNTER(stats_.alloc_count);
// Only handle "small" allocations.
if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
if (bucket_index > largest_active_bucket_index_) [[unlikely]] {
PA_INCREMENT_COUNTER(stats_.alloc_miss_too_large);
PA_INCREMENT_COUNTER(stats_.alloc_misses);
return 0;
}
auto& bucket = buckets_[bucket_index];
if (PA_LIKELY(bucket.freelist_head)) {
if (bucket.freelist_head) [[likely]] {
PA_INCREMENT_COUNTER(stats_.alloc_hits);
} else {
PA_DCHECK(bucket.count == 0);
@@ -535,7 +535,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// Very unlikely, means that the central allocator is out of memory. Let it
// deal with it (may return 0, may crash).
if (PA_UNLIKELY(!bucket.freelist_head)) {
if (!bucket.freelist_head) [[unlikely]] {
return 0;
}
}
@@ -17,6 +17,7 @@ namespace {
// this array may either refer to features defined in //base features.
const base::Feature* const kFeaturesExposedToJava[] = {
&features::kPostPowerMonitorBroadcastReceiverInitToBackground,
&features::kPostGetMyMemoryStateToBackground,
};
// static
+2 -2
View File
@@ -430,7 +430,7 @@ BinderStatusOr<Parcel> BinderRef::TransactImpl(transaction_code_t code,
AParcel* in = parcel.release();
AParcel* out;
const auto status =
api.AIBinder_transact(binder_.get(), code, &in, &out, 0);
api.AIBinder_transact(binder_.get(), code, &in, &out, flags);
if (status != STATUS_OK) {
return unexpected(status);
}
@@ -529,7 +529,7 @@ binder_status_t SupportsBinderBase::OnIBinderTransact(AIBinder* binder,
}
// If binder NDK is unsupported, nobody will be calling this method.
NOTREACHED_NORETURN();
NOTREACHED();
}
} // namespace internal
+17
View File
@@ -8,6 +8,7 @@
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/check_op.h"
#include "base/memory/singleton.h"
@@ -92,6 +93,22 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
soc_manufacturer_(StrDupParam(params, 32)),
is_debug_app_(GetIntParam(params, 33)) {}
BuildInfo::~BuildInfo() = default;
void BuildInfo::set_gms_version_code_for_test(
const std::string& gms_version_code) {
// This leaks the string, just like production code.
gms_version_code_ = strdup(gms_version_code.c_str());
Java_BuildInfo_setGmsVersionCodeForTest(AttachCurrentThread(),
gms_version_code);
}
std::string BuildInfo::host_signing_cert_sha256() {
JNIEnv* env = AttachCurrentThread();
return base::android::ConvertJavaStringToUTF8(
env, Java_BuildInfo_lazyGetHostSigningCertSha256(env));
}
// static
BuildInfo* BuildInfo::GetInstance() {
return Singleton<BuildInfo, BuildInfoSingletonTraits >::get();
+9 -2
View File
@@ -48,7 +48,7 @@ class BASE_EXPORT BuildInfo {
BuildInfo(const BuildInfo&) = delete;
BuildInfo& operator=(const BuildInfo&) = delete;
~BuildInfo() {}
~BuildInfo();
// Static factory method for getting the singleton BuildInfo instance.
// Note that ownership is not conferred on the caller and the BuildInfo in
@@ -88,6 +88,8 @@ class BASE_EXPORT BuildInfo {
return gms_version_code_;
}
void set_gms_version_code_for_test(const std::string& gms_version_code);
// The package name of the host app which has loaded WebView, retrieved from
// the application context. In the context of the SDK Runtime, the package
// name of the app that owns this particular instance of the SDK Runtime will
@@ -105,6 +107,10 @@ class BASE_EXPORT BuildInfo {
// that owns this particular instance of the SDK Runtime.
const char* host_package_label() const { return host_package_label_; }
// The SHA256 of the public certificate used to sign the host application.
// This will default to an empty string if we were unable to retrieve it.
std::string host_signing_cert_sha256();
const char* package_version_code() const {
return package_version_code_;
}
@@ -195,7 +201,8 @@ class BASE_EXPORT BuildInfo {
const char* const package_version_code_;
const char* const package_version_name_;
const char* const android_build_fp_;
const char* const gms_version_code_;
// Can be overridden in tests.
const char* gms_version_code_ = nullptr;
const char* const installer_package_name_;
const char* const abi_name_;
const char* const custom_themes_;
@@ -6,6 +6,7 @@
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/files/file.h"
// Must come after all headers that specialize FromJniType() / ToJniType().
#include "base/base_jni/ContentUriUtils_jni.h"
@@ -22,11 +23,39 @@ bool ContentUriExists(const FilePath& content_uri) {
return Java_ContentUriUtils_contentUriExists(env, j_uri);
}
File OpenContentUriForRead(const FilePath& content_uri) {
std::optional<std::string> TranslateOpenFlagsToJavaMode(uint32_t open_flags) {
// The allowable modes from ParcelFileDescriptor#parseMode() are
// ("r", "w", "wt", "wa", "rw", "rwt"), we disallow "w" which has been the
// source of android security issues.
// Ignore async.
open_flags &= ~File::FLAG_ASYNC;
switch (open_flags) {
case File::FLAG_OPEN | File::FLAG_READ:
return "r";
case File::FLAG_OPEN_ALWAYS | File::FLAG_READ | File::FLAG_WRITE:
return "rw";
case File::FLAG_OPEN_ALWAYS | File::FLAG_APPEND:
return "wa";
case File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE:
return "rwt";
case File::FLAG_CREATE_ALWAYS | File::FLAG_WRITE:
return "wt";
default:
return std::nullopt;
}
}
File OpenContentUri(const FilePath& content_uri, uint32_t open_flags) {
JNIEnv* env = base::android::AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_uri =
ConvertUTF8ToJavaString(env, content_uri.value());
jint fd = Java_ContentUriUtils_openContentUriForRead(env, j_uri);
auto mode = TranslateOpenFlagsToJavaMode(open_flags);
CHECK(mode.has_value()) << "Unsupported flags=0x" << std::hex << open_flags;
ScopedJavaLocalRef<jstring> j_mode =
ConvertUTF8ToJavaString(env, mode.value());
jint fd = Java_ContentUriUtils_openContentUri(env, j_uri, j_mode);
if (fd < 0)
return File();
return File(fd);
@@ -80,16 +109,4 @@ bool DeleteContentUri(const FilePath& content_uri) {
return Java_ContentUriUtils_delete(env, j_uri);
}
FilePath GetContentUriFromFilePath(const FilePath& file_path) {
JNIEnv* env = base::android::AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_file_path =
ConvertUTF8ToJavaString(env, file_path.value());
ScopedJavaLocalRef<jstring> j_content_uri =
Java_ContentUriUtils_getContentUriFromFilePath(env, j_file_path);
if (j_content_uri.is_null())
return FilePath();
return FilePath(base::android::ConvertJavaStringToUTF8(env, j_content_uri));
}
} // namespace base
@@ -14,9 +14,18 @@
namespace base {
// Opens a content URI for read and returns the file descriptor to the caller.
// Translates base::File::FLAG_* `open_flags` bitset to Java mode from
// ParcelFileDescriptor#parseMode(): ("r", "w", "wt", "wa", "rw" or "rwt").
// Disallows "w" which has been the source of android security issues.
// Returns nullopt if `open_flags` are not supported.
BASE_EXPORT std::optional<std::string> TranslateOpenFlagsToJavaMode(
uint32_t open_flags);
// Opens a content URI and returns the file descriptor to the caller.
// `open_flags` is a bitmap of base::File::FLAG_* values.
// Returns -1 if the URI is invalid.
BASE_EXPORT File OpenContentUriForRead(const FilePath& content_uri);
BASE_EXPORT File OpenContentUri(const FilePath& content_uri,
uint32_t open_flags);
// Gets file size, or -1 if file is unknown length.
BASE_EXPORT int64_t GetContentUriFileSize(const FilePath& content_uri);
@@ -35,10 +44,6 @@ BASE_EXPORT bool MaybeGetFileDisplayName(const FilePath& content_uri,
// Deletes a content URI.
BASE_EXPORT bool DeleteContentUri(const FilePath& content_uri);
// Gets content URI's file path (eg: "content://org.chromium...") from normal
// file path (eg: "/data/user/0/...").
BASE_EXPORT FilePath GetContentUriFromFilePath(const FilePath& file_path);
} // namespace base
#endif // BASE_ANDROID_CONTENT_URI_UTILS_H_
@@ -6,14 +6,22 @@
namespace base {
bool ContentUriExists(const FilePath& content_uri) {
return false;
std::optional<std::string> TranslateOpenFlagsToJavaMode(uint32_t) {
return {};
}
File OpenContentUriForRead(const FilePath& content_uri) {
File OpenContentUri(const FilePath&, uint32_t) {
return {};
}
int64_t GetContentUriFileSize(const FilePath&) {
return -1;
}
bool ContentUriExists(const FilePath&) {
return false;
}
std::string GetContentUriMimeType(const FilePath& content_uri) {
return {};
}
@@ -27,8 +35,4 @@ bool DeleteContentUri(const FilePath& content_uri) {
return false;
}
FilePath GetContentUriFromFilePath(const FilePath& file_path) {
return {};
}
} // namespace base
@@ -73,9 +73,7 @@ void InputHintChecker::InitializeFeatures() {
}
}
void InputHintChecker::SetView(
JNIEnv* env,
const jni_zero::JavaParamRef<jobject>& root_view) {
void InputHintChecker::SetView(JNIEnv* env, jobject root_view) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
InitState state = FetchState();
if (state == InitState::kFailedToInitialize) {
@@ -91,7 +89,7 @@ void InputHintChecker::SetView(
// j.l.reflect.Method via double-reflection.
TransitionToState(InitState::kInProgress);
view_class_ =
ScopedJavaGlobalRef<jobject>(env, env->GetObjectClass(root_view.obj()));
ScopedJavaGlobalRef<jobject>(env, env->GetObjectClass(root_view));
pthread_t new_thread;
if (pthread_create(&new_thread, nullptr, OffThreadInitInvoker::Run,
nullptr) != 0) {
@@ -305,8 +303,8 @@ InputHintChecker::ScopedOverrideInstance::~ScopedOverrideInstance() {
}
void JNI_InputHintChecker_SetView(_JNIEnv* env,
const jni_zero::JavaParamRef<jobject>& v) {
InputHintChecker::GetInstance().SetView(env, v);
const JavaParamRef<jobject>& v) {
InputHintChecker::GetInstance().SetView(env, v.obj());
}
jboolean JNI_InputHintChecker_IsInitializedForTesting(_JNIEnv* env) {
@@ -41,7 +41,7 @@ class BASE_EXPORT InputHintChecker {
// Obtains a weak reference to |root_view| so that the following calls to
// HasInput() take the input hint for this View. Requirements for the View
// object are described in InputHintChecker.java.
void SetView(JNIEnv* env, const jni_zero::JavaParamRef<jobject>& root_view);
void SetView(JNIEnv* env, jobject root_view);
// Fetches and returns the input hint from the Android Framework.
//
+189 -131
View File
@@ -2,151 +2,179 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/android/jni_array.h"
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/check_op.h"
#include "base/containers/extend.h"
#include "base/containers/heap_array.h"
#include "base/numerics/safe_conversions.h"
namespace base::android {
ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
const uint8_t* bytes,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jbyteArray byte_array = env->NewByteArray(len_jsize);
CheckException(env);
DCHECK(byte_array);
env->SetByteArrayRegion(byte_array, 0, len_jsize,
reinterpret_cast<const jbyte*>(bytes));
CheckException(env);
return ScopedJavaLocalRef<jbyteArray>(env, byte_array);
UNSAFE_BUFFER_USAGE ScopedJavaLocalRef<jbyteArray>
ToJavaByteArray(JNIEnv* env, const uint8_t* bytes, size_t len) {
return ToJavaByteArray(
env,
// SAFETY: The caller must provide a valid pointer and length.
UNSAFE_BUFFERS(base::span(bytes, len)));
}
ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(
JNIEnv* env,
base::span<const uint8_t> bytes) {
return ToJavaByteArray(env, bytes.data(), bytes.size());
jbyteArray byte_array = env->NewByteArray(checked_cast<jsize>(bytes.size()));
CheckException(env);
DCHECK(byte_array);
static_assert(sizeof(jbyte) == sizeof(uint8_t));
static_assert(alignof(jbyte) <= alignof(uint8_t));
env->SetByteArrayRegion(byte_array, jsize{0},
checked_cast<jsize>(bytes.size()),
reinterpret_cast<const jbyte*>(bytes.data()));
CheckException(env);
return ScopedJavaLocalRef<jbyteArray>(env, byte_array);
}
ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
const std::string& str) {
std::string_view str) {
return ToJavaByteArray(env, base::as_byte_span(str));
}
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(
JNIEnv* env,
const std::vector<bool>& bool_vec) {
bool bool_arr[bool_vec.size()];
std::copy(bool_vec.begin(), bool_vec.end(), bool_arr);
return ToJavaBooleanArray(env, bool_arr, bool_vec.size());
}
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
const bool* bools,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jbooleanArray boolean_array = env->NewBooleanArray(len_jsize);
// SAFETY: The caller must provide a valid pointer and length, as enforced
// by UNSAFE_BUFFER_USAGE in the header.
return ToJavaBooleanArray(env, UNSAFE_BUFFERS(base::span(bools, len)));
}
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(
JNIEnv* env,
const std::vector<bool>& bools) {
// Make an actual array of types equivalent to `bool`.
auto actual_bools = HeapArray<bool>::Uninit(bools.size());
std::ranges::copy(bools, actual_bools.begin());
return ToJavaBooleanArray(env, actual_bools);
}
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
span<const bool> bools) {
jbooleanArray boolean_array =
env->NewBooleanArray(checked_cast<jsize>(bools.size()));
CheckException(env);
DCHECK(boolean_array);
env->SetBooleanArrayRegion(boolean_array, 0, len_jsize,
reinterpret_cast<const jboolean*>(bools));
static_assert(sizeof(jboolean) == sizeof(bool));
static_assert(alignof(jboolean) <= alignof(bool));
env->SetBooleanArrayRegion(boolean_array, jsize{0},
checked_cast<jsize>(bools.size()),
reinterpret_cast<const jboolean*>(bools.data()));
CheckException(env);
return ScopedJavaLocalRef<jbooleanArray>(env, boolean_array);
}
// TODO(tsepez): this should be declared UNSAFE_BUFFER_USAGE in the header.
ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
const int* ints,
const int32_t* ints,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jintArray int_array = env->NewIntArray(len_jsize);
// SAFETY: The caller must provide a valid pointer and length.
return ToJavaIntArray(env, UNSAFE_BUFFERS(base::span(ints, len)));
}
ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
base::span<const int32_t> ints) {
jintArray int_array = env->NewIntArray(checked_cast<jsize>(ints.size()));
CheckException(env);
DCHECK(int_array);
env->SetIntArrayRegion(int_array, 0, len_jsize,
reinterpret_cast<const jint*>(ints));
static_assert(sizeof(jint) == sizeof(int32_t));
static_assert(alignof(jint) <= alignof(int32_t));
env->SetIntArrayRegion(int_array, jsize{0}, checked_cast<jsize>(ints.size()),
reinterpret_cast<const jint*>(ints.data()));
CheckException(env);
return ScopedJavaLocalRef<jintArray>(env, int_array);
}
ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
base::span<const int> ints) {
return ToJavaIntArray(env, ints.data(), ints.size());
}
ScopedJavaLocalRef<jlongArray> ToJavaLongArray(JNIEnv* env,
const int64_t* longs,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jlongArray long_array = env->NewLongArray(len_jsize);
CheckException(env);
DCHECK(long_array);
env->SetLongArrayRegion(long_array, 0, len_jsize,
reinterpret_cast<const jlong*>(longs));
CheckException(env);
return ScopedJavaLocalRef<jlongArray>(env, long_array);
// SAFETY: The caller must provide a valid pointer and length, as enforced
// by UNSAFE_BUFFER_USAGE in the header.
return ToJavaLongArray(env, UNSAFE_BUFFERS(base::span(longs, len)));
}
// Returns a new Java long array converted from the given int64_t array.
BASE_EXPORT ScopedJavaLocalRef<jlongArray> ToJavaLongArray(
JNIEnv* env,
base::span<const int64_t> longs) {
return ToJavaLongArray(env, longs.data(), longs.size());
jlongArray long_array = env->NewLongArray(checked_cast<jsize>(longs.size()));
CheckException(env);
DCHECK(long_array);
static_assert(sizeof(jlong) == sizeof(int64_t));
static_assert(alignof(jlong) <= alignof(int64_t));
env->SetLongArrayRegion(long_array, jsize{0},
checked_cast<jsize>(longs.size()),
reinterpret_cast<const jlong*>(longs.data()));
CheckException(env);
return ScopedJavaLocalRef<jlongArray>(env, long_array);
}
// Returns a new Java float array converted from the given C++ float array.
BASE_EXPORT ScopedJavaLocalRef<jfloatArray>
ToJavaFloatArray(JNIEnv* env, const float* floats, size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jfloatArray float_array = env->NewFloatArray(len_jsize);
CheckException(env);
DCHECK(float_array);
env->SetFloatArrayRegion(float_array, 0, len_jsize,
reinterpret_cast<const jfloat*>(floats));
CheckException(env);
return ScopedJavaLocalRef<jfloatArray>(env, float_array);
// SAFETY: The caller must provide a valid pointer and length, as enforced
// by UNSAFE_BUFFER_USAGE in the header.
return ToJavaFloatArray(env, UNSAFE_BUFFERS(base::span(floats, len)));
}
BASE_EXPORT ScopedJavaLocalRef<jfloatArray> ToJavaFloatArray(
JNIEnv* env,
base::span<const float> floats) {
return ToJavaFloatArray(env, floats.data(), floats.size());
jfloatArray float_array =
env->NewFloatArray(checked_cast<jsize>(floats.size()));
CheckException(env);
DCHECK(float_array);
static_assert(sizeof(jfloat) == sizeof(float));
static_assert(alignof(jfloat) <= alignof(float));
env->SetFloatArrayRegion(float_array, jsize{0},
checked_cast<jsize>(floats.size()),
reinterpret_cast<const jfloat*>(floats.data()));
CheckException(env);
return ScopedJavaLocalRef<jfloatArray>(env, float_array);
}
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray>
ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jdoubleArray double_array = env->NewDoubleArray(len_jsize);
CheckException(env);
DCHECK(double_array);
env->SetDoubleArrayRegion(double_array, 0, len_jsize,
reinterpret_cast<const jdouble*>(doubles));
CheckException(env);
return ScopedJavaLocalRef<jdoubleArray>(env, double_array);
// SAFETY: The caller must provide a valid pointer and length, as enforced
// by UNSAFE_BUFFER_USAGE in the header.
return ToJavaDoubleArray(env, UNSAFE_BUFFERS(base::span(doubles, len)));
}
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray> ToJavaDoubleArray(
JNIEnv* env,
base::span<const double> doubles) {
return ToJavaDoubleArray(env, doubles.data(), doubles.size());
jdoubleArray double_array =
env->NewDoubleArray(checked_cast<jsize>(doubles.size()));
CheckException(env);
DCHECK(double_array);
static_assert(sizeof(jdouble) == sizeof(double));
static_assert(alignof(jdouble) <= alignof(double));
env->SetDoubleArrayRegion(double_array, jsize{0},
checked_cast<jsize>(doubles.size()),
reinterpret_cast<const jdouble*>(doubles.data()));
CheckException(env);
return ScopedJavaLocalRef<jdoubleArray>(env, double_array);
}
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
@@ -158,7 +186,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -177,7 +205,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -191,7 +219,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -205,7 +233,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -220,7 +248,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array = ToJavaByteArray(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), byte_array.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -235,7 +263,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array = ToJavaByteArray(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), byte_array.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -249,7 +277,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF8ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), item.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -267,7 +295,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), inner.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
@@ -286,7 +314,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), inner.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
@@ -301,7 +329,7 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF16ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
env->SetObjectArrayElement(joa, checked_cast<jsize>(i), item.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@@ -313,13 +341,16 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
if (!array)
return;
size_t len = SafeGetArrayLength(env, array);
size_t back = out->size();
out->resize(back + len);
if (!len) {
return;
}
out->resize(out->size() + len);
span<std::u16string> back = span(*out).last(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
ConvertJavaStringToUTF16(env, str.obj(), out->data() + back + i);
array.obj(), checked_cast<jsize>(i))));
ConvertJavaStringToUTF16(env, str.obj(), &back[i]);
}
}
@@ -330,13 +361,16 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
if (!array)
return;
size_t len = SafeGetArrayLength(env, array);
size_t back = out->size();
out->resize(back + len);
if (!len) {
return;
}
out->resize(out->size() + len);
span<std::string> back = span(*out).last(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
ConvertJavaStringToUTF8(env, str.obj(), out->data() + back + i);
array.obj(), checked_cast<jsize>(i))));
ConvertJavaStringToUTF8(env, str.obj(), &back[i]);
}
}
@@ -347,12 +381,17 @@ void AppendJavaByteArrayToByteVector(JNIEnv* env,
if (!byte_array)
return;
size_t len = SafeGetArrayLength(env, byte_array);
if (!len)
if (!len) {
return;
size_t back = out->size();
out->resize(back + len);
env->GetByteArrayRegion(byte_array.obj(), 0, static_cast<jsize>(len),
reinterpret_cast<int8_t*>(out->data() + back));
}
out->resize(out->size() + len);
span<uint8_t> back = span(*out).last(len);
static_assert(sizeof(jbyte) == sizeof(uint8_t));
static_assert(alignof(jbyte) <= alignof(uint8_t));
env->GetByteArrayRegion(byte_array.obj(), jsize{0},
checked_cast<jsize>(back.size()),
reinterpret_cast<jbyte*>(back.data()));
}
void JavaByteArrayToByteVector(JNIEnv* env,
@@ -369,11 +408,13 @@ size_t JavaByteArrayToByteSpan(JNIEnv* env,
base::span<uint8_t> dest) {
CHECK(byte_array);
size_t len = SafeGetArrayLength(env, byte_array);
size_t span_len = dest.size_bytes();
CHECK_GE(span_len, len) << "Target span is too small, java array size: "
<< len << ", span size: " << span_len;
env->GetByteArrayRegion(byte_array.obj(), 0, static_cast<jsize>(len),
reinterpret_cast<int8_t*>(dest.data()));
span<uint8_t> copy_dest = dest.first(len);
static_assert(sizeof(jbyte) == sizeof(uint8_t));
static_assert(alignof(jbyte) <= alignof(uint8_t));
env->GetByteArrayRegion(byte_array.obj(), jsize{0},
checked_cast<jsize>(copy_dest.size()),
reinterpret_cast<jbyte*>(copy_dest.data()));
return len;
}
@@ -395,15 +436,22 @@ void JavaBooleanArrayToBoolVector(JNIEnv* env,
if (!boolean_array)
return;
size_t len = SafeGetArrayLength(env, boolean_array);
if (!len)
return;
out->resize(len);
// It is not possible to get bool* out of vector<bool>.
jboolean* values = env->GetBooleanArrayElements(boolean_array.obj(), nullptr);
for (size_t i = 0; i < len; ++i) {
out->at(i) = static_cast<bool>(values[i]);
if (!len) {
return;
}
env->ReleaseBooleanArrayElements(boolean_array.obj(), values, JNI_ABORT);
// SAFETY: `SafeGetArrayLength()` returns the number of elements in the
// `boolean_array`, though it can return 0 if the array is invalid. So we only
// call `GetBooleanArrayElements()` when it's positive. Then
// GetBooleanArrayElements() returns a buffer of the size returned from
// `SafeGetArrayLength()`.
span<jboolean> values = UNSAFE_BUFFERS(
span(env->GetBooleanArrayElements(boolean_array.obj(), nullptr), len));
for (size_t i = 0; i < values.size(); ++i) {
(*out)[i] = static_cast<bool>(values[i]);
}
env->ReleaseBooleanArrayElements(boolean_array.obj(), values.data(),
JNI_ABORT);
}
void JavaIntArrayToIntVector(JNIEnv* env,
@@ -414,7 +462,7 @@ void JavaIntArrayToIntVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetIntArrayRegion(int_array.obj(), 0, static_cast<jsize>(len),
env->GetIntArrayRegion(int_array.obj(), jsize{0}, checked_cast<jsize>(len),
out->data());
}
@@ -425,7 +473,7 @@ void JavaLongArrayToInt64Vector(JNIEnv* env,
std::vector<jlong> temp;
JavaLongArrayToLongVector(env, long_array, &temp);
out->resize(0);
out->insert(out->begin(), temp.begin(), temp.end());
Extend(*out, temp);
}
void JavaLongArrayToLongVector(JNIEnv* env,
@@ -436,7 +484,7 @@ void JavaLongArrayToLongVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetLongArrayRegion(long_array.obj(), 0, static_cast<jsize>(len),
env->GetLongArrayRegion(long_array.obj(), jsize{0}, checked_cast<jsize>(len),
out->data());
}
@@ -448,8 +496,8 @@ void JavaFloatArrayToFloatVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetFloatArrayRegion(float_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetFloatArrayRegion(float_array.obj(), jsize{0},
checked_cast<jsize>(len), out->data());
}
void JavaDoubleArrayToDoubleVector(JNIEnv* env,
@@ -460,8 +508,8 @@ void JavaDoubleArrayToDoubleVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetDoubleArrayRegion(double_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetDoubleArrayRegion(double_array.obj(), jsize{0},
checked_cast<jsize>(len), out->data());
}
void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
@@ -473,11 +521,21 @@ void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array(
env, static_cast<jbyteArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
array.obj(), checked_cast<jsize>(i))));
size_t bytes_len = SafeGetArrayLength(env, bytes_array);
jbyte* bytes = env->GetByteArrayElements(bytes_array.obj(), nullptr);
(*out)[i].assign(reinterpret_cast<const char*>(bytes), bytes_len);
env->ReleaseByteArrayElements(bytes_array.obj(), bytes, JNI_ABORT);
// SAFETY: `SafeGetArrayLength()` returns the number of elements in the
// `boobytes_array`, though it can return 0 if the array is invalid. So we
// only call `GetByteArrayElements()` when it's positive. Then
// GetByteArrayElements() returns a buffer of the size returned from
// `SafeGetArrayLength()`.
if (!bytes_len) {
(*out)[i].clear();
continue;
}
span<jbyte> bytes = UNSAFE_BUFFERS(
span(env->GetByteArrayElements(bytes_array.obj(), nullptr), bytes_len));
(*out)[i] = base::as_string_view(base::as_bytes(bytes));
env->ReleaseByteArrayElements(bytes_array.obj(), bytes.data(), JNI_ABORT);
}
}
@@ -490,7 +548,7 @@ void JavaArrayOfByteArrayToBytesVector(JNIEnv* env,
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array(
env, static_cast<jbyteArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
array.obj(), checked_cast<jsize>(i))));
JavaByteArrayToByteVector(env, bytes_array, &(*out)[i]);
}
}
@@ -505,10 +563,10 @@ void Java2dStringArrayTo2dStringVector(
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array(
env, static_cast<jobjectArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
array.obj(), checked_cast<jsize>(i))));
out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
(*out)[i].clear();
AppendJavaStringArrayToStringVector(env, strings_array, &(*out)[i]);
}
}
@@ -522,10 +580,10 @@ void Java2dStringArrayTo2dStringVector(
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array(
env, static_cast<jobjectArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
array.obj(), checked_cast<jsize>(i))));
out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
(*out)[i].clear();
AppendJavaStringArrayToStringVector(env, strings_array, &(*out)[i]);
}
}
@@ -538,8 +596,8 @@ void JavaArrayOfIntArrayToIntVector(JNIEnv* env,
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jintArray> int_array(
env, static_cast<jintArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
JavaIntArrayToIntVector(env, int_array, &out->at(i));
array.obj(), checked_cast<jsize>(i))));
JavaIntArrayToIntVector(env, int_array, &(*out)[i]);
}
}
+62 -40
View File
@@ -8,12 +8,14 @@
#include <jni.h>
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <string>
#include <vector>
#include "base/android/scoped_java_ref.h"
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
namespace base::android {
@@ -32,110 +34,130 @@ BASE_EXPORT size_t SafeGetArrayLength(JNIEnv* env,
}
// Returns a new Java byte array converted from the given bytes array.
BASE_EXPORT ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
const uint8_t* bytes,
size_t len);
UNSAFE_BUFFER_USAGE BASE_EXPORT ScopedJavaLocalRef<jbyteArray>
ToJavaByteArray(JNIEnv* env, const uint8_t* bytes, size_t len);
BASE_EXPORT ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(
JNIEnv* env,
base::span<const uint8_t> bytes);
span<const uint8_t> bytes);
// Returns a new Java byte array converted from the given string. No UTF-8
// conversion is performed.
BASE_EXPORT ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(
JNIEnv* env,
const std::string& str);
// Returns a new Java boolean array converted from the given bool vector.
BASE_EXPORT ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(
JNIEnv* env,
const std::vector<bool>& bool_vec);
std::string_view str);
// Returns a new Java boolean array converted from the given bool array.
BASE_EXPORT ScopedJavaLocalRef<jbooleanArray>
BASE_EXPORT ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(
JNIEnv* env,
span<const bool> bools);
// Returns a new Java boolean array converted from the given bool vector.
//
// std::vector<bool> does not convert to span, so we have a separate overload.
BASE_EXPORT ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(
JNIEnv* env,
const std::vector<bool>& bools);
// Returns a new Java boolean array converted from the given bool array.
//
// TODO(crbug.com/40284755): Remove this overload, use spans.
UNSAFE_BUFFER_USAGE BASE_EXPORT ScopedJavaLocalRef<jbooleanArray>
ToJavaBooleanArray(JNIEnv* env, const bool* bools, size_t len);
// Returns a new Java int array converted from the given int array.
BASE_EXPORT ScopedJavaLocalRef<jintArray> ToJavaIntArray(
JNIEnv* env, const int* ints, size_t len);
BASE_EXPORT ScopedJavaLocalRef<jintArray> ToJavaIntArray(
JNIEnv* env,
base::span<const int> ints);
span<const int32_t> ints);
// Returns a new Java int array converted from the given int array.
//
// TODO(crbug.com/40284755): Remove this overload, use spans.
BASE_EXPORT ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
const int32_t* ints,
size_t len);
// Returns a new Java long array converted from the given int64_t array.
BASE_EXPORT ScopedJavaLocalRef<jlongArray> ToJavaLongArray(JNIEnv* env,
const int64_t* longs,
size_t len);
BASE_EXPORT ScopedJavaLocalRef<jlongArray> ToJavaLongArray(
JNIEnv* env,
base::span<const int64_t> longs);
span<const int64_t> longs);
// Returns a new Java long array converted from the given int64_t array.
//
// TODO(crbug.com/40284755): Remove this overload, use spans.
UNSAFE_BUFFER_USAGE BASE_EXPORT ScopedJavaLocalRef<jlongArray>
ToJavaLongArray(JNIEnv* env, const int64_t* longs, size_t len);
// Returns a new Java float array converted from the given C++ float array.
BASE_EXPORT ScopedJavaLocalRef<jfloatArray> ToJavaFloatArray(
JNIEnv* env, const float* floats, size_t len);
BASE_EXPORT ScopedJavaLocalRef<jfloatArray> ToJavaFloatArray(
JNIEnv* env,
base::span<const float> floats);
span<const float> floats);
// Returns a new Java float array converted from the given C++ float array.
//
// TODO(crbug.com/40284755): Remove this overload, use spans.
UNSAFE_BUFFER_USAGE BASE_EXPORT ScopedJavaLocalRef<jfloatArray>
ToJavaFloatArray(JNIEnv* env, const float* floats, size_t len);
// Returns a new Java double array converted from the given C++ double array.
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray>
ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len);
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray> ToJavaDoubleArray(
JNIEnv* env,
base::span<const double> doubles);
span<const double> doubles);
// Returns a new Java double array converted from the given C++ double array.
//
// TODO(crbug.com/40284755): Remove this overload, use spans.
UNSAFE_BUFFER_USAGE BASE_EXPORT ScopedJavaLocalRef<jdoubleArray>
ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len);
// Returns a new clazz[] with the content of |v|.
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
JNIEnv* env,
jclass clazz,
base::span<const ScopedJavaLocalRef<jobject>> v);
span<const ScopedJavaLocalRef<jobject>> v);
// Returns a new Object[] with the content of |v|.
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v);
span<const ScopedJavaLocalRef<jobject>> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v);
span<const ScopedJavaGlobalRef<jobject>> v);
// Returns a new Type[] with the content of |v|.
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v,
span<const ScopedJavaLocalRef<jobject>> v,
jclass type);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v,
span<const ScopedJavaGlobalRef<jobject>> v,
jclass type);
// Returns a array of Java byte array converted from |v|.
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env,
base::span<const std::string> v);
span<const std::string> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env,
base::span<const std::vector<uint8_t>> v);
span<const std::vector<uint8_t>> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env,
base::span<const std::string> v);
span<const std::string> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env,
base::span<const std::u16string> v);
span<const std::u16string> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
JNIEnv* env,
base::span<const std::vector<std::string>> v);
span<const std::vector<std::string>> v);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
JNIEnv* env,
base::span<const std::vector<std::u16string>> v);
span<const std::vector<std::u16string>> v);
// Converts a Java string array to a native array.
BASE_EXPORT void AppendJavaStringArrayToStringVector(
@@ -166,7 +188,7 @@ BASE_EXPORT void JavaByteArrayToByteVector(
BASE_EXPORT size_t
JavaByteArrayToByteSpan(JNIEnv* env,
const JavaRef<jbyteArray>& byte_array,
base::span<uint8_t> dest);
span<uint8_t> dest);
// Replaces the content of |out| with the Java bytes in |byte_array|. No UTF-8
// conversion is performed.
@@ -84,10 +84,6 @@ std::string ConvertJavaStringToUTF8(JNIEnv* env, const JavaRef<jstring>& str) {
ScopedJavaLocalRef<jstring> ConvertUTF8ToJavaString(JNIEnv* env,
std::string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
// JNI's NewStringUTF expects "modified" UTF8 so instead create the string
// via our own UTF16 conversion utility.
// Further, Dalvik requires the string passed into NewStringUTF() to come from
@@ -154,10 +150,6 @@ std::u16string ConvertJavaStringToUTF16(JNIEnv* env,
ScopedJavaLocalRef<jstring> ConvertUTF16ToJavaString(JNIEnv* env,
std::u16string_view str) {
// ART allocates new empty strings, so use a singleton when applicable.
if (str.empty()) {
return jni_zero::g_empty_string.AsLocalRef(env);
}
return ScopedJavaLocalRef<jstring>(env,
ConvertUTF16ToJavaStringImpl(env, str));
}
@@ -126,7 +126,14 @@ bool FindRegionInOpenFile(int fd, uintptr_t* out_address, size_t* out_size) {
// Loop until no bytes left to scan. On every iteration except the last, fill
// the buffer till the end. On every iteration except the first, the buffer
// begins with kMaxLineLength bytes from the end of the previous fill.
// Silence clang's warning about allocating on the stack because this is a very
// special case.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wvla-extension"
char buf[kReadSize + kMaxLineLength + 1];
#pragma clang diagnostic pop
buf[kReadSize + kMaxLineLength] = '\0'; // Stop strstr().
size_t pos = 0;
size_t bytes_requested = kReadSize + kMaxLineLength;
@@ -133,7 +133,7 @@ __attribute__((always_inline, no_instrument_function)) void RecordAddress(
for_testing ? kStartOfTextForTesting : base::android::kStartOfText;
const size_t end =
for_testing ? kEndOfTextForTesting : base::android::kEndOfText;
if (UNLIKELY(address < start || address > end)) {
if (address < start || address > end) [[unlikely]] {
if (!AreAnchorsSane()) {
// Something is really wrong with the anchors, and this is likely to be
// triggered from within a static constructor, where logging is likely to
@@ -185,7 +185,7 @@ __attribute__((always_inline, no_instrument_function)) void RecordAddress(
auto& ordered_offsets_index = g_data[index].index;
size_t insertion_index =
ordered_offsets_index.fetch_add(1, std::memory_order_relaxed);
if (UNLIKELY(insertion_index >= kMaxElements)) {
if (insertion_index >= kMaxElements) [[unlikely]] {
Disable();
LOG(FATAL) << "Too many reached offsets";
}
@@ -64,6 +64,12 @@
static <fields>;
}
# Causes R8 to more agressively optimize ServiceLoader.load() calls, by
# assuming no exceptions will be thrown.
-assumenosideeffects class java.util.ServiceLoader {
static *** load(...);
}
# Keep the names of exception types, to make it easier to understand stack
# traces in contexts where it's not trivial to deobfuscate them - for example
# when reported to app developers who are using WebView.
@@ -83,15 +83,6 @@ void TaskRunnerAndroid::PostDelayedTask(
Milliseconds(delay));
}
bool TaskRunnerAndroid::BelongsToCurrentThread(JNIEnv* env) {
// TODO(crbug.com/40108370): Move BelongsToCurrentThread from TaskRunnerImpl
// to SequencedTaskRunnerImpl on the Java side too.
if (type_ == TaskRunnerType::BASE)
return false;
return static_cast<SequencedTaskRunner*>(task_runner_.get())
->RunsTasksInCurrentSequence();
}
// static
std::unique_ptr<TaskRunnerAndroid> TaskRunnerAndroid::Create(
jint task_runner_type,
+16 -4
View File
@@ -273,12 +273,24 @@ BASE_EXPORT FilePath NSURLToFilePath(NSURL* url);
#endif // __OBJC__
// Converts a non-null |path| to a CFURLRef. |path| must not be empty.
//
// This function only uses manually-owned resources, so it does not depend on an
// NSAutoreleasePool being set up on the current thread.
// CoreFoundation versions of the above calls. These only uses manually-owned
// resources, so they do not depend on an NSAutoreleasePool being set up on the
// current thread.
// Converts |path| to a CFURLRef. Returns nil if |path| is empty.
BASE_EXPORT ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path);
// Converts |path| to a CFStringRef. Returns nil if |path| is empty.
BASE_EXPORT ScopedCFTypeRef<CFStringRef> FilePathToCFString(
const FilePath& path);
// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
BASE_EXPORT FilePath CFStringToFilePath(CFStringRef str);
// Converts |url| to a FilePath. Returns an empty path if |url| is nil or if
// |url| is not of scheme "file".
BASE_EXPORT FilePath CFURLToFilePath(CFURLRef url);
#if defined(__OBJC__)
// Converts |range| to an NSRange, returning the new range in |range_out|.
// Returns true if conversion was successful, false if the values of |range|
+54 -27
View File
@@ -4,14 +4,18 @@
#include "base/apple/foundation_util.h"
#include <CoreFoundation/CoreFoundation.h>
#import <Foundation/Foundation.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include "base/apple/bridging.h"
#include "base/apple/bundle_locations.h"
#include "base/apple/osstatus_logging.h"
#include "base/apple/scoped_cftyperef.h"
#include "base/containers/adapters.h"
#include "base/files/file_path.h"
#include "base/logging.h"
@@ -55,6 +59,12 @@ bool UncachedAmIBundled() {
#endif
}
bool CFURLIsFileURL(CFURLRef url) {
ScopedCFTypeRef<CFStringRef> scheme(CFURLCopyScheme(url));
return CFStringCompare(scheme.get(), CFSTR("file"),
kCFCompareCaseInsensitive) == kCFCompareEqualTo;
}
} // namespace
bool AmIBundled() {
@@ -365,53 +375,70 @@ std::string GetValueFromDictionaryErrorMessage(CFStringRef key,
}
NSURL* FilePathToNSURL(const FilePath& path) {
if (NSString* path_string = FilePathToNSString(path)) {
return [NSURL fileURLWithPath:path_string];
}
return nil;
return apple::CFToNSOwnershipCast(FilePathToCFURL(path).release());
}
NSString* FilePathToNSString(const FilePath& path) {
if (path.empty()) {
return nil;
}
return @(path.value().c_str()); // @() does UTF8 conversion.
return apple::CFToNSOwnershipCast(FilePathToCFString(path).release());
}
FilePath NSStringToFilePath(NSString* str) {
if (!str.length) {
return FilePath();
}
return FilePath(str.fileSystemRepresentation);
return CFStringToFilePath(apple::NSToCFPtrCast(str));
}
FilePath NSURLToFilePath(NSURL* url) {
if (!url.fileURL) {
return FilePath();
}
return NSStringToFilePath(url.path);
return CFURLToFilePath(apple::NSToCFPtrCast(url));
}
ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path) {
DCHECK(!path.empty());
if (path.empty()) {
return ScopedCFTypeRef<CFURLRef>();
}
// The function's docs promise that it does not require an NSAutoreleasePool.
// A straightforward way to accomplish this is to use *Create* functions,
// combined with ScopedCFTypeRef.
const std::string& path_string = path.value();
ScopedCFTypeRef<CFStringRef> path_cfstring(CFStringCreateWithBytes(
kCFAllocatorDefault, reinterpret_cast<const UInt8*>(path_string.data()),
checked_cast<CFIndex>(path_string.length()), kCFStringEncodingUTF8,
/*isExternalRepresentation=*/FALSE));
if (!path_cfstring) {
ScopedCFTypeRef<CFStringRef> path_string(
CFStringCreateWithFileSystemRepresentation(kCFAllocatorDefault,
path.value().c_str()));
if (!path_string) {
return ScopedCFTypeRef<CFURLRef>();
}
return ScopedCFTypeRef<CFURLRef>(CFURLCreateWithFileSystemPath(
kCFAllocatorDefault, path_cfstring.get(), kCFURLPOSIXPathStyle,
kCFAllocatorDefault, path_string.get(), kCFURLPOSIXPathStyle,
/*isDirectory=*/FALSE));
}
ScopedCFTypeRef<CFStringRef> FilePathToCFString(const FilePath& path) {
if (path.empty()) {
return ScopedCFTypeRef<CFStringRef>();
}
return ScopedCFTypeRef<CFStringRef>(
CFStringCreateWithFileSystemRepresentation(kCFAllocatorDefault,
path.value().c_str()));
}
FilePath CFStringToFilePath(CFStringRef str) {
if (!str || CFStringGetLength(str) == 0) {
return FilePath();
}
return FilePath(FilePath::GetHFSDecomposedForm(str));
}
FilePath CFURLToFilePath(CFURLRef url) {
if (!url || !CFURLIsFileURL(url)) {
return FilePath();
}
ScopedCFTypeRef<CFStringRef> path(
CFURLCopyFileSystemPath(url, kCFURLPOSIXPathStyle));
if (!path) {
return FilePath();
}
return CFStringToFilePath(path.get());
}
bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
NSUInteger end;
if (IsValueInRangeForNumericType<NSUInteger>(range.location) &&
+46 -30
View File
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/base64.h"
#include <stddef.h>
@@ -44,13 +39,20 @@ void Base64EncodeAppend(span<const uint8_t> input, std::string* output) {
CHECK_LE(input.size(), MODP_B64_MAX_INPUT_LEN);
size_t encode_data_len = modp_b64_encode_data_len(input.size());
size_t prefix_len = output->size();
output->resize(base::CheckAdd(encode_data_len, prefix_len).ValueOrDie());
const size_t after_size =
base::CheckAdd(encode_data_len, output->size()).ValueOrDie();
output->resize(after_size);
const size_t output_size = modp_b64_encode_data(
output->data() + prefix_len, reinterpret_cast<const char*>(input.data()),
input.size());
CHECK_EQ(output->size(), prefix_len + output_size);
span<const char> read = base::as_chars(input);
span<char> write = base::span(*output).last(encode_data_len);
const size_t written_size = modp_b64_encode_data(
write.data(), // This must point to `encode_data_len` many chars.
read.data(), read.size());
// If this failed it would indicate we wrote OOB or left bytes uninitialized.
// It's possible for this to be elided by the compiler, since writing OOB is
// UB.
CHECK_EQ(written_size, write.size());
}
std::string Base64Encode(std::string_view input) {
@@ -60,13 +62,12 @@ std::string Base64Encode(std::string_view input) {
bool Base64Decode(std::string_view input,
std::string* output,
Base64DecodePolicy policy) {
std::string temp;
temp.resize(modp_b64_decode_len(input.size()));
std::string decode_buf;
decode_buf.resize(modp_b64_decode_len(input.size()));
// does not null terminate result since result is binary data!
size_t input_size = input.size();
size_t output_size = modp_b64_decode(&(temp[0]), input.data(), input_size,
GetModpPolicy(policy));
// Does not NUL-terminate result since result is binary data!
size_t written_size = modp_b64_decode(decode_buf.data(), input.data(),
input.size(), GetModpPolicy(policy));
// Forgiving mode requires whitespace to be stripped prior to decoding.
// We don't do that in the above code to ensure that the "happy path" of
@@ -74,37 +75,52 @@ bool Base64Decode(std::string_view input,
// will always cause `modp_b64_decode` to fail, just handle whitespace
// stripping on failure. This is not much slower than just scanning for
// whitespace first, even for input with whitespace.
if (output_size == MODP_B64_ERROR &&
if (written_size == MODP_B64_ERROR &&
policy == Base64DecodePolicy::kForgiving) {
// We could use `output` here to avoid an allocation when decoding is done
// in-place, but it violates the API contract that `output` is only modified
// on success.
std::string input_without_whitespace;
RemoveChars(input, kInfraAsciiWhitespace, &input_without_whitespace);
output_size =
modp_b64_decode(&(temp[0]), input_without_whitespace.data(),
// This means that the required size to decode is at most what was needed
// above, which means `decode_buf` will fit the decoded bytes at its current
// size and we don't need to call `modp_b64_decode_len()` again.
CHECK_LE(input_without_whitespace.size(), input.size());
written_size =
modp_b64_decode(decode_buf.data(), input_without_whitespace.data(),
input_without_whitespace.size(), GetModpPolicy(policy));
}
if (output_size == MODP_B64_ERROR)
if (written_size == MODP_B64_ERROR) {
return false;
}
temp.resize(output_size);
output->swap(temp);
// If this failed it would indicate we wrote OOB. It's possible for this to be
// elided by the compiler, since writing OOB is UB.
CHECK_LE(written_size, decode_buf.size());
// Shrinks the buffer and makes it NUL-terminated.
decode_buf.resize(written_size);
*output = std::move(decode_buf);
return true;
}
std::optional<std::vector<uint8_t>> Base64Decode(std::string_view input) {
std::vector<uint8_t> ret(modp_b64_decode_len(input.size()));
std::vector<uint8_t> write_buf(modp_b64_decode_len(input.size()));
span<char> write = base::as_writable_chars(base::span(write_buf));
size_t input_size = input.size();
size_t output_size = modp_b64_decode(reinterpret_cast<char*>(ret.data()),
input.data(), input_size);
if (output_size == MODP_B64_ERROR)
size_t written_size =
modp_b64_decode(write.data(), input.data(), input.size());
if (written_size == MODP_B64_ERROR) {
return std::nullopt;
}
ret.resize(output_size);
return ret;
// If this failed it would indicate we wrote OOB. It's possible for this to be
// elided by the compiler, since writing OOB is UB.
CHECK_LE(written_size, write.size());
write_buf.resize(written_size);
return write_buf;
}
} // namespace base
+1 -17
View File
@@ -10,8 +10,6 @@
#include "base/check_version_internal.h"
#include "base/debug/alias.h"
#include "base/debug/dump_without_crashing.h"
#include "base/feature_list.h"
#include "base/features.h"
#include "base/logging.h"
#include "base/thread_annotations.h"
#include "base/types/cxx23_to_underlying.h"
@@ -57,20 +55,6 @@ LogSeverity GetCheckSeverity(base::NotFatalUntil fatal_milestone) {
return GetNotFatalUntilSeverity(fatal_milestone);
}
LogSeverity GetNotReachedSeverity(base::NotFatalUntil fatal_milestone) {
// NOTREACHED severity is controlled by kNotReachedIsFatal unless
// `fatal_milestone` overrides it.
//
// NOTREACHED_IN_MIGRATION() instances may be hit before base::FeatureList is
// enabled.
if (fatal_milestone == base::NotFatalUntil::NoSpecifiedMilestoneInternal &&
base::FeatureList::GetInstance() &&
base::FeatureList::IsEnabled(base::features::kNotReachedIsFatal)) {
return LOGGING_FATAL;
}
return GetNotFatalUntilSeverity(fatal_milestone);
}
base::debug::CrashKeyString* GetNotReachedCrashKey() {
#if BUILDFLAG(IS_NACL)
return nullptr;
@@ -362,7 +346,7 @@ CheckError::CheckError(LogMessage* log_message) : log_message_(log_message) {}
NotReachedError NotReachedError::NotReached(base::NotFatalUntil fatal_milestone,
const base::Location& location) {
auto* const log_message = new NotReachedLogMessage(
location, GetNotReachedSeverity(fatal_milestone), fatal_milestone);
location, GetCheckSeverity(fatal_milestone), fatal_milestone);
// TODO(pbos): Consider a better message for NotReached(), this is here to
// match existing behavior + test expectations.
+18 -10
View File
@@ -187,7 +187,7 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
// CHECK(Foo());
//
// TODO(crbug.com/40244950): Remove the const bool when the blink-gc plugin has
// been updated to accept `if (LIKELY(!field_))` as well as `if (!field_)`.
// been updated to accept `if (!field_) [[likely]]` as well as `if (!field_)`.
#define LOGGING_CHECK_FUNCTION_IMPL(check_stream, condition) \
switch (0) \
case 0: \
@@ -196,8 +196,8 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
/* The optimizer can use this as a hint to place the failure path */ \
/* out-of-line, e.g. at the tail of the function. */ \
if (const bool probably_true = static_cast<bool>(condition); \
LIKELY(ANALYZER_ASSUME_TRUE(probably_true))) \
; \
ANALYZER_ASSUME_TRUE(probably_true)) \
[[likely]]; \
else \
(check_stream)
@@ -212,6 +212,14 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
base::ImmediateCrash();
}
// TODO(crbug.com/357081797): Use `[[unlikely]]` instead when there's a way to
// switch the expression below to a statement without breaking
// -Wthread-safety-analysis.
#if HAS_BUILTIN(__builtin_expect)
#define BASE_INTERNAL_EXPECT_FALSE(cond) __builtin_expect(!(cond), 0)
#else
#define BASE_INTERNAL_EXPECT_FALSE(cond) !(cond)
#endif
// Discard log strings to reduce code bloat when there is no NotFatalUntil
// argument (which temporarily preserves logging both locally and in crash
// reports).
@@ -221,12 +229,12 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
// compiler optimizations. Unlike the other check macros, this one does not use
// LOGGING_CHECK_FUNCTION_IMPL(), since it is incompatible with
// EAT_CHECK_STREAM_PARAMETERS().
#define CHECK(condition, ...) \
BASE_IF(BASE_IS_EMPTY(__VA_ARGS__), \
UNLIKELY(!(condition)) ? logging::CheckFailure() \
: EAT_CHECK_STREAM_PARAMS(), \
LOGGING_CHECK_FUNCTION_IMPL( \
logging::CheckError::Check(#condition, __VA_ARGS__), condition))
#define CHECK(cond, ...) \
BASE_IF(BASE_IS_EMPTY(__VA_ARGS__), \
BASE_INTERNAL_EXPECT_FALSE(cond) ? logging::CheckFailure() \
: EAT_CHECK_STREAM_PARAMS(), \
LOGGING_CHECK_FUNCTION_IMPL( \
logging::CheckError::Check(#cond, __VA_ARGS__), cond))
#define CHECK_WILL_STREAM() false
@@ -296,7 +304,7 @@ class BASE_EXPORT NotReachedNoreturnError : public CheckError {
[[noreturn]] BASE_EXPORT void RawCheckFailure(const char* message);
#define RAW_CHECK(condition) \
do { \
if (UNLIKELY(!(condition))) { \
if (!(condition)) [[unlikely]] { \
::logging::RawCheckFailure("Check failed: " #condition "\n"); \
} \
} while (0)
+1 -1
View File
@@ -39,7 +39,7 @@ template <typename T>
// Note: we can't just call `CHECK_NE(ptr, nullptr)` here, as that would
// cause the error to be reported from this header, and we want the error
// to be reported at the file and line of the caller.
if (UNLIKELY(ptr == nullptr)) {
if (ptr == nullptr) [[unlikely]] {
#if !CHECK_WILL_STREAM()
CheckFailure();
#else
+3 -1
View File
@@ -18,7 +18,9 @@
// See https://crbug.com/672699.
#define BLINK_RELEASE_ASSERT_EQUIVALENT(assertion) \
(UNLIKELY(!(assertion)) ? (base::ImmediateCrash()) : (void)0)
if (!(assertion)) [[unlikely]] { \
base::ImmediateCrash(); \
}
void DoCheck(bool b) {
CHECK(b) << "DoCheck " << b;

Some files were not shown because too many files have changed in this diff Show More