Update On Sun Dec 14 19:39:35 CET 2025

This commit is contained in:
github-action[bot]
2025-12-14 19:39:36 +01:00
parent c8e74f10e5
commit e3fa06eaac
5059 changed files with 192386 additions and 93334 deletions
+1 -1
View File
@@ -1 +1 @@
140.0.7339.123
143.0.7499.109
+1 -7
View File
@@ -67,7 +67,7 @@ Options:
The default proxy is a direct connection without proxying.
The last PROXY-URI is negotiated automatically for Naive padding.
Can be specified multiple times, but they must match the number of specified
If multiple proxies are specified, they must match the number of specified
LISTEN-URIs, and each LISTEN-URI is routed to the PROXY matched by position.
Limitations:
@@ -93,7 +93,6 @@ Options:
--host-resolver-rules="MAP proxy.example.com 1.2.3.4"
Statically resolves a domain name to an IP address.
Multiple rules are comma separated.
--resolver-range=CIDR
@@ -115,8 +114,3 @@ Options:
--no-post-quantum
Overrides the default and disables post-quantum key agreement.
--env=NAME=VALUE
Sets the environment variable NAME to the value VALUE. Can be specified
multiple times.
+24
View File
@@ -102,6 +102,7 @@ Ambareesh Balaji <ambareeshbalaji@gmail.com>
Ambarish Rapte <ambarish.r@samsung.com>
Ameen Basha <ameenbasha111@gmail.com>
Amey Jahagirdar <jahagird@amazon.com>
Amit P <ponnan2112@gmail.com>
Amit Paul <a.paul@samsung.com>
Amit Sarkar <amit.srkr@samsung.com>
Amogh Bihani <amogh.bihani@samsung.com>
@@ -133,6 +134,7 @@ Andrew MacPherson <andrew.macpherson@soundtrap.com>
Andrew Nicols <andrewrn@gmail.com>
Andrew Tulloch <andrew@tullo.ch>
Andriy Rysin <arysin@gmail.com>
Ane Diaz de Tuesta <anediaz@gmail.com>
Anish Patankar <anish.p@samsung.com>
Ankit Kiran <sahuankit453@gmail.com>
Ankit Kumar <ankit2.kumar@samsung.com>
@@ -175,6 +177,7 @@ Arunprasad Rajkumar <ararunprasad@gmail.com>
Arunprasad Rajkumar <arurajku@cisco.com>
Arup Barua <arup.barua@samsung.com>
Aryan Kaushik <aryankaushik2023@gmail.com>
Aryan P Krishnan <aryankrishnan4b@gmail.com>
Asami Doi <d0iasm.pub@gmail.com>
Ashish Kumar Gupta <guptaag@amazon.com>
Ashlin Joseph <ashlin.j@samsung.com>
@@ -301,6 +304,7 @@ Clemens Fruhwirth <clemens@endorphin.org>
Clement Scheelfeldt Skau <clementskau@gmail.com>
Clinton Staley <clintstaley@gmail.com>
Cong Zuo <zckevinzc@gmail.com>
Connor Hewitt <connor.hewitt@gmail.com>
Connor Pearson <cjp822@gmail.com>
Conrad Irwin <conrad.irwin@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
@@ -311,6 +315,7 @@ Daeyoon Choi <yoonda5898@gmail.com>
Dai Chunyang <chunyang.dai@intel.com>
Daiwei Li <daiweili@suitabletech.com>
Damien Marié <damien@dam.io>
Damitha Gunawardena <damitha@canva.com>
Dan McCombs <overridex@gmail.com>
Daniel Adams <msub2official@gmail.com>
Daniel Bertalan <dani@danielbertalan.dev>
@@ -411,6 +416,7 @@ Dushyant Kant Sharma <dush.sharma@samsung.com>
Dustin Doloff <doloffd@amazon.com>
Ebrahim Byagowi <ebrahim@gnu.org>
Ebrahim Byagowi <ebraminio@gmail.com>
Eden Wang <nedenwang@gmail.com>
Eden Wang <nedenwang@tencent.com>
Eduardo Lima (Etrunko) <eblima@gmail.com>
Eduardo Lima (Etrunko) <eduardo.lima@intel.com>
@@ -506,6 +512,7 @@ Greg Visser <gregvis@gmail.com>
Gregory Davis <gpdavis.chromium@gmail.com>
Grzegorz Czajkowski <g.czajkowski@samsung.com>
Guangzhen Li <guangzhen.li@intel.com>
Guohui Xie <vampirelightsss@gmail.com>
Guobin Wu <wuguobin.1229@bytedance.com>
Gurpreet Kaur <k.gurpreet@samsung.com>
Gustav Tiger <gustav.tiger@sonymobile.com>
@@ -537,6 +544,7 @@ Harshit Pal <harshitp12345@gmail.com>
Hassan Salehe Matar <hassansalehe@gmail.com>
Hautio Kari <khautio@gmail.com>
He Qi <heqi899@gmail.com>
He Yang <1160386205@qq.com>
Heejin R. Chung <heejin.r.chung@samsung.com>
Heeyoun Lee <heeyoun.lee@samsung.com>
Helmut Januschka <helmut@januschka.com>
@@ -544,6 +552,7 @@ Henrique de Carvalho <decarv.henrique@gmail.com>
Henrique Limas <henrique.ramos.limas@gmail.com>
Henrique Valcanaia <henriqueindalencio@gmail.com>
Henry Lim <henry@limhenry.xyz>
Hewei Hewro <ihewro@gmail.com>
Hikari Fujimoto <hikari.p.fujimoto@gmail.com>
Himadri Agrawal <h2.agrawal@samsung.com>
Himanshu Joshi <h.joshi@samsung.com>
@@ -917,6 +926,7 @@ Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Lisha Guo <lisha.guo@intel.com>
Lizhi Fan <lizhi.fan@samsung.com>
Lloyd Huang <bzkirto@gmail.com>
Lloyd Torres <torlloyd@amazon.com>
Loay Ghreeb <loayahmed655@gmail.com>
Loo Rong Jie <loorongjie@gmail.com>
Lorenzo Stoakes <lstoakes@gmail.com>
@@ -939,6 +949,7 @@ Lu Yahan <yahan@iscas.ac.cn>
Lyra Rebane <rebane2001@gmail.com>
Ma Aiguo <imaiguo@gmail.com>
Maarten Lankhorst <m.b.lankhorst@gmail.com>
Maciej Czarnecki <mcczarny@gmail.com>
Maciej Pawlowski <m.pawlowski@eyeo.com>
Magnus Danielsson <fuzzac@gmail.com>
Mahesh Kulkarni <mahesh.kk@samsung.com>
@@ -1024,6 +1035,7 @@ Md Sami Uddin <md.sami@samsung.com>
Mego Tan <tannal2409@gmail.com>
Merajul Arefin <merajularefin@gmail.com>
Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Chan <mzchan@dolby.com>
Michael Cirone <mikecirone@gmail.com>
Michael Constant <mconst@gmail.com>
Michael Forney <mforney@mforney.org>
@@ -1055,6 +1067,7 @@ Mingeun Park <mindal99546@gmail.com>
Minggang Wang <minggang.wang@intel.com>
Mingmin Xie <melvinxie@gmail.com>
Mingming Xu <mingming1.xu@intel.com>
Mingtao Zhou <mingtaoxt@gmail.com>
Mingyue Ji <myandyji@gmail.com>
Minjeong Kim <deoxyribonucleicacid150@gmail.com>
Minjeong Lee <apenr1234@gmail.com>
@@ -1081,6 +1094,7 @@ Mohraiel Matta <mohraielmatta@gmail.com>
Moiseanu Rares-Marian <moiseanurares@gmail.com>
Momoka Yamamoto <momoka.my6@gmail.com>
Momoko Hattori <momohatt10@gmail.com>
Mostafa Aboalkasim <mostafa.aboalkasim.offical@gmail.com>
Mostafa Sedaghat joo <mostafa.sedaghat@gmail.com>
Mrunal Kapade <mrunal.kapade@intel.com>
Muhammad Mahad <mahadtxt@gmail.com>
@@ -1493,12 +1507,14 @@ Tanvir Rizvi <tanvir.rizvi@samsung.com>
Tao Wang <tao.wang.2261@gmail.com>
Tao Xiong <taox4@illinois.edu>
Tapu Kumar Ghose <ghose.tapu@gmail.com>
Tau Gärtli <google@tau.garden>
Taylor Price <trprice@gmail.com>
Ted Kim <neot0000@gmail.com>
Ted Vessenes <tedvessenes@gmail.com>
Teodora Novkovic <teodora.petrovic@gmail.com>
Thiago Farina <thiago.farina@gmail.com>
Thiago Marcos P. Santos <thiago.santos@intel.com>
Thibault Gagnaux <tgagnaux@gmail.com>
Thirumurugan <thiruak1024@gmail.com>
Thomas Butter <tbutter@gmail.com>
Thomas Conti <tomc@amazon.com>
@@ -1532,6 +1548,7 @@ Torsten Kurbad <google@tk-webart.de>
Toshihito Kikuchi <leamovret@gmail.com>
Toshiaki Tanaka <zokutyou2@gmail.com>
Travis Leithead <travis.leithead@gmail.com>
Trent Taylor <trent.taylor@ulteig.com>
Trent Willis <trentmwillis@gmail.com>
Trevor Perrin <unsafe@trevp.net>
Tripta Gupta <triptagupta19@gmail.com>
@@ -1540,6 +1557,7 @@ Tristan Fraipont <tristan.fraipont@gmail.com>
Tudor Brindus <me@tbrindus.ca>
Tushar Singh <tusharsinghnx@gmail.com>
Tuukka Toivonen <tuukka.toivonen@intel.com>
Tyler Carson <tyler@keepersecurity.com>
Tyler Jones <tylerjdev@github.com>
U. Artie Eoff <ullysses.a.eoff@intel.com>
Umar Hansa <umar.hansa@gmail.com>
@@ -1561,6 +1579,7 @@ Vernon Tang <vt@foilhead.net>
Viatcheslav Ostapenko <sl.ostapenko@samsung.com>
Victor Costan <costan@gmail.com>
Victor Solonsky <victor.solonsky@gmail.com>
Vidya Balachander <vidyakbalachander@gmail.com>
Viet-Trung Luu <viettrungluu@gmail.com>
Vikas Mundra <vikas.mundra@samsung.com>
Vinay Anantharaman <vinaya@adobe.com>
@@ -1624,7 +1643,9 @@ Xunran Ding <xunran.ding@samsung.com>
Xunran Ding <dingxunran@gmail.com>
Yael Aharon <yael.aharon@intel.com>
Yagiz Nizipli <yagiz@nizipli.com>
Yaksh Bariya <yakshbari4@gmail.com>
Yan Wang <yan0422.wang@samsung.com>
Yaniv Yissachar <yaniv.yscr@gmail.com>
Yang Gu <yang.gu@intel.com>
Yang Liu <jd9668954@gmail.com>
Yang Liu <yangliu.leo@bytedance.com>
@@ -1676,6 +1697,7 @@ Yuma Takai <tara20070827@gmail.com>
Yumikiyo Osanai <yumios.art@gmail.com>
Yumin Su <yuminsu.hi@gmail.com>
Yun Jiyun <tomatoziiilll@gmail.com>
Yun Ye <yeyun.anton@gmail.com>
Yunchao He <yunchao.he@intel.com>
Yupei Lin <yplam@yplam.com>
Yupei Wang <perryuwang@tencent.com>
@@ -1736,6 +1758,7 @@ BlackBerry Limited <*@blackberry.com>
Bocoup <*@bocoup.com>
Brave Software Inc. <*@brave.com>
Canonical Limited <*@canonical.com>
Canva Pty Ltd <*@canva.com>
Cloudflare, Inc. <*@cloudflare.com>
CloudMosa, Inc. <*@cloudmosa.com>
Code Aurora Forum <*@codeaurora.org>
@@ -1760,6 +1783,7 @@ IBM Inc. <*@ibm.com>
Igalia S.L. <*@igalia.com>
Imagination Technologies Limited <*@imagination.corp-partner.google.com>
Impossible Dreams Network <*@impossibledreams.net>
imput LLC <*@imput.net>
Intel Corporation <*@intel.com>
Island Technology, Inc. <*@island.io>
LG Electronics, Inc. <*@lge.com>
+2
View File
@@ -8,7 +8,9 @@
# you add a new build file, there must be some path of dependencies from this
# file to your new one or GN won't know about it.
import("//build/config/c++/modules.gni")
import("//build/config/cast.gni")
import("//build/config/chrome_build.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/cronet/config.gni")
import("//build/config/dcheck_always_on.gni")
+415 -395
View File
File diff suppressed because it is too large Load Diff
+37 -15
View File
@@ -46,15 +46,14 @@ if (is_ios) {
import("//build/config/ios/ios_sdk.gni")
}
if (is_mac) {
# Used to generate fuzzer corpus :base_mach_port_rendezvous_convert_corpus.
import("//third_party/protobuf/proto_library.gni")
}
if (is_win) {
import("//build/config/win/control_flow_guard.gni")
}
if (is_android) {
import("//build/config/android/config.gni")
}
declare_args() {
# Unsafe developer build. Has developer-friendly features that may weaken or
# disable security measures like sandboxing or ASLR.
@@ -208,7 +207,10 @@ component("base") {
"bit_cast.h",
"bits.h",
"build_time.h",
"byte_count.cc",
"byte_count.h",
"byte_size.cc",
"byte_size.h",
"callback_list.cc",
"callback_list.h",
"cancelable_callback.h",
@@ -256,6 +258,8 @@ component("base") {
"containers/to_value_list.h",
"containers/to_vector.h",
"containers/unique_ptr_adapters.h",
"containers/variant_map.cc",
"containers/variant_map.h",
"containers/vector_buffer.h",
"critical_closure.h",
"dcheck_is_on.h",
@@ -355,8 +359,11 @@ component("base") {
"memory/aligned_memory.h",
"memory/asan_interface.h",
"memory/free_deleter.h",
"memory/memory_pressure_level.h",
"memory/memory_pressure_listener.cc",
"memory/memory_pressure_listener.h",
"memory/memory_pressure_listener_registry.cc",
"memory/memory_pressure_listener_registry.h",
"memory/memory_pressure_monitor.cc",
"memory/memory_pressure_monitor.h",
"memory/page_size.h",
@@ -422,6 +429,7 @@ component("base") {
"memory_coordinator/memory_consumer.h",
"memory_coordinator/memory_consumer_registry.cc",
"memory_coordinator/memory_consumer_registry.h",
"memory_coordinator/memory_consumer_registry_destruction_observer.h",
"memory_coordinator/traits.h",
"message_loop/io_watcher.cc",
"message_loop/io_watcher.h",
@@ -442,6 +450,8 @@ component("base") {
"metrics/dummy_histogram.h",
"metrics/field_trial.cc",
"metrics/field_trial.h",
"metrics/field_trial_entry.cc",
"metrics/field_trial_entry.h",
"metrics/field_trial_list_including_low_anonymity.cc",
"metrics/field_trial_list_including_low_anonymity.h",
"metrics/field_trial_param_associator.cc",
@@ -662,6 +672,8 @@ component("base") {
"synchronization/lock.cc",
"synchronization/lock.h",
"synchronization/lock_impl.h",
"synchronization/lock_metrics_recorder.cc",
"synchronization/lock_metrics_recorder.h",
"synchronization/lock_subtle.h",
"synchronization/waitable_event.cc",
"synchronization/waitable_event.h",
@@ -695,6 +707,8 @@ component("base") {
"task/delay_policy.h",
"task/delayed_task_handle.cc",
"task/delayed_task_handle.h",
"task/execution_fence.cc",
"task/execution_fence.h",
"task/lazy_thread_pool_task_runner.cc",
"task/lazy_thread_pool_task_runner.h",
"task/post_job.cc",
@@ -902,7 +916,6 @@ component("base") {
"trace_event/trace_id_helper.h",
"traits_bag.h",
"tuple.h",
"types/always_false.h",
"types/cxx23_from_range.h",
"types/cxx23_is_scoped_enum.h",
"types/cxx23_to_underlying.h",
@@ -1125,18 +1138,24 @@ component("base") {
# Android.
if (is_android) {
sources += [
"android/android_info.h",
"android/android_info_stub.cc",
"android/apk_info.h",
"android/apk_info_stub.cc",
"android/application_status_listener.h",
"android/application_status_listener_stub.cc",
"android/background_thread_pool_field_trial.cc",
"android/background_thread_pool_field_trial.h",
"android/build_info.h",
"android/build_info_stub.cc",
"android/content_uri_utils.h",
"android/content_uri_utils_stub.cc",
"android/device_info.h",
"android/device_info_stub.cc",
"android/virtual_document_path.cc",
"android/virtual_document_path.h",
"android/yield_to_looper_checker.cc",
"android/yield_to_looper_checker.h",
"android/sys_utils.h",
"android/sys_utils_stub.cc",
"debug/stack_trace_android.cc",
"files/file_android.cc",
"files/file_android.h",
@@ -1164,14 +1183,18 @@ component("base") {
]
deps += [
"//third_party/ashmem",
"//third_party/cpu_features:ndk_compat",
]
if (android_ndk_api_level >= 29) {
deps += [
":android_info_aidl_native",
":build_info_aidl_native",
deps += [ ":build_info_aidl_native" ]
sources += [
# Cronet uses lower NDK API level than is required for these files,
# but it doesn't need AHardwareBuffer anyway.
"android/scoped_hardware_buffer_fence_sync.cc",
"android/scoped_hardware_buffer_fence_sync.h",
"android/scoped_hardware_buffer_handle.cc",
"android/scoped_hardware_buffer_handle.h",
]
}
@@ -1251,6 +1274,7 @@ component("base") {
"android/library_loader/library_prefetcher_hooks.cc",
"android/native_uma_recorder.cc",
"android/scoped_java_ref.h",
"android/statistics_recorder_android.cc",
"android/token_android.cc",
"android/token_android.h",
"android/trace_event_binding.cc",
@@ -1451,7 +1475,6 @@ component("base") {
"memory/discardable_memory_internal.h",
"metrics/persistent_histogram_storage.cc",
"metrics/persistent_histogram_storage.h",
"native_library.cc",
"native_library.h",
"path_service.cc",
"path_service.h",
@@ -1596,8 +1619,6 @@ component("base") {
sources += [
"allocator/dispatcher/memory_tagging.cc",
"allocator/dispatcher/memory_tagging.h",
"allocator/miracle_parameter.cc",
"allocator/miracle_parameter.h",
"allocator/partition_alloc_features.cc",
"allocator/partition_alloc_features.h",
"allocator/partition_alloc_support.cc",
@@ -1708,6 +1729,7 @@ component("base") {
"win/dark_mode_support.h",
"win/default_apps_util.cc",
"win/default_apps_util.h",
"win/delayload_helpers.h",
"win/elevation_util.cc",
"win/elevation_util.h",
"win/embedded_i18n/language_selector.cc",
-5
View File
@@ -1,6 +1,5 @@
include_rules = [
"+aidl/org/chromium/base",
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
@@ -35,10 +34,6 @@ include_rules = [
]
specific_include_rules = {
# Special case
"process/current_process(|_test)\.h": [
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
],
# To evaluate the performance effects of using absl's flat_hash_map.
"supports_user_data\.cc": [
"+third_party/abseil-cpp/absl/container/flat_hash_map.h",
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
@@ -158,6 +158,18 @@ struct DispatcherImpl {
return address;
}
static void* AllocZeroInitializedUncheckedFn(size_t n,
size_t size,
void* context) {
void* const address =
allocator_dispatch_.next->alloc_zero_initialized_unchecked_function(
n, size, context);
DoNotifyAllocationForShim(address, n * size);
return address;
}
static void* AllocAlignedFn(size_t alignment, size_t size, void* context) {
void* const address = allocator_dispatch_.next->alloc_aligned_function(
alignment, size, context);
@@ -347,28 +359,30 @@ std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function
ReallocUncheckedFn, // realloc_unchecked_function
FreeFn, // free_function
FreeWithSizeFn, // free_with_size_function
FreeWithAlignmentFn, // free_with_alignment_function
FreeWithSizeAndAlignmentFn, // free_with_size_and_alignment_function
nullptr, // get_size_estimate_function
nullptr, // good_size_function
nullptr, // claimed_address_function
BatchMallocFn, // batch_malloc_function
BatchFreeFn, // batch_free_function
TryFreeDefaultFn, // try_free_default_function
AlignedMallocFn, // aligned_malloc_function
AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function
AlignedReallocFn, // aligned_realloc_function
AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
AlignedFreeFn, // aligned_free_function
nullptr // next
.alloc_function = AllocFn,
.alloc_unchecked_function = AllocUncheckedFn,
.alloc_zero_initialized_function = AllocZeroInitializedFn,
.alloc_zero_initialized_unchecked_function =
AllocZeroInitializedUncheckedFn,
.alloc_aligned_function = AllocAlignedFn,
.realloc_function = ReallocFn,
.realloc_unchecked_function = ReallocUncheckedFn,
.free_function = FreeFn,
.free_with_size_function = FreeWithSizeFn,
.free_with_alignment_function = FreeWithAlignmentFn,
.free_with_size_and_alignment_function = FreeWithSizeAndAlignmentFn,
.get_size_estimate_function = nullptr,
.good_size_function = nullptr,
.claimed_address_function = nullptr,
.batch_malloc_function = BatchMallocFn,
.batch_free_function = BatchFreeFn,
.try_free_default_function = TryFreeDefaultFn,
.aligned_malloc_function = AlignedMallocFn,
.aligned_malloc_unchecked_function = AlignedMallocUncheckedFn,
.aligned_realloc_function = AlignedReallocFn,
.aligned_realloc_unchecked_function = AlignedReallocUncheckedFn,
.aligned_free_function = AlignedFreeFn,
.next = nullptr,
};
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/allocator/dispatcher/memory_tagging.h"
namespace base::allocator::dispatcher {
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
#define BASE_ALLOCATOR_DISPATCHER_MEMORY_TAGGING_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
@@ -1,81 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/allocator/miracle_parameter.h"
#include "base/command_line.h"
#include "base/strings/strcat.h"
#include "base/system/sys_info.h"
namespace base::miracle_parameter {
std::string GetParamNameWithSuffix(const std::string& param_name) {
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
// internally. If the CommandLine is not initialized, we return early to avoid
// a crash.
if (!base::CommandLine::InitializedForCurrentProcess()) {
return param_name;
}
int physical_memory_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
const char* suffix =
physical_memory_mb < kMiracleParameterMemory512MB ? "ForLessThan512MB"
: physical_memory_mb < kMiracleParameterMemory1GB ? "For512MBTo1GB"
: physical_memory_mb < kMiracleParameterMemory2GB ? "For1GBTo2GB"
: physical_memory_mb < kMiracleParameterMemory4GB ? "For2GBTo4GB"
: physical_memory_mb < kMiracleParameterMemory8GB ? "For4GBTo8GB"
: physical_memory_mb < kMiracleParameterMemory16GB ? "For8GBTo16GB"
: "For16GBAndAbove";
return base::StrCat({param_name, suffix});
}
std::string GetMiracleParameterAsString(const base::Feature& feature,
const std::string& param_name,
const std::string& default_value) {
return GetFieldTrialParamByFeatureAsString(
feature, GetParamNameWithSuffix(param_name),
GetFieldTrialParamByFeatureAsString(feature, param_name, default_value));
}
double GetMiracleParameterAsDouble(const base::Feature& feature,
const std::string& param_name,
double default_value) {
return base::GetFieldTrialParamByFeatureAsDouble(
feature, GetParamNameWithSuffix(param_name),
base::GetFieldTrialParamByFeatureAsDouble(feature, param_name,
default_value));
}
int GetMiracleParameterAsInt(const base::Feature& feature,
const std::string& param_name,
int default_value) {
return base::GetFieldTrialParamByFeatureAsInt(
feature, GetParamNameWithSuffix(param_name),
base::GetFieldTrialParamByFeatureAsInt(feature, param_name,
default_value));
}
bool GetMiracleParameterAsBool(const base::Feature& feature,
const std::string& param_name,
bool default_value) {
return base::GetFieldTrialParamByFeatureAsBool(
feature, GetParamNameWithSuffix(param_name),
base::GetFieldTrialParamByFeatureAsBool(feature, param_name,
default_value));
}
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
const std::string& param_name,
base::TimeDelta default_value) {
return base::GetFieldTrialParamByFeatureAsTimeDelta(
feature, GetParamNameWithSuffix(param_name),
base::GetFieldTrialParamByFeatureAsTimeDelta(feature, param_name,
default_value));
}
} // namespace base::miracle_parameter
@@ -1,182 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
#define BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
#include "base/base_export.h"
#include "base/containers/span.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
// This is a mirror copy of the //components/miracle_parameter/ to resolve the
// dependency cycle of (base->miracle_parameter->base).
// Eventually the miracle_parameter component will have a public interface in
// //base/ and this could be removed.
// TODO(crbug.com/40279826): remove miracle_parameter from
// //base/allocator/.
namespace base {
namespace miracle_parameter {
namespace {
template <typename Enum>
Enum GetFieldTrialParamByFeatureAsEnum(
const base::Feature& feature,
const std::string& param_name,
const Enum default_value,
const base::span<const typename base::FeatureParam<Enum>::Option>&
options) {
std::string string_value =
base::GetFieldTrialParamValueByFeature(feature, param_name);
if (string_value.empty()) {
return default_value;
}
for (const auto& option : options) {
if (string_value == option.name) {
return option.value;
}
}
base::LogInvalidEnumValue(feature, param_name, string_value,
static_cast<int>(default_value));
return default_value;
}
} // namespace
constexpr int kMiracleParameterMemory512MB = 512;
constexpr int kMiracleParameterMemory1GB = 1024;
constexpr int kMiracleParameterMemory2GB = 2 * 1024;
constexpr int kMiracleParameterMemory4GB = 4 * 1024;
constexpr int kMiracleParameterMemory8GB = 8 * 1024;
constexpr int kMiracleParameterMemory16GB = 16 * 1024;
// GetParamNameWithSuffix put a parameter name suffix based on
// the amount of physical memory.
//
// - "ForLessThan512MB" for less than 512MB memory devices.
// - "For512MBTo1GB" for 512MB to 1GB memory devices.
// - "For1GBTo2GB" for 1GB to 2GB memory devices.
// - "For2GBTo4GB" for 2GB to 4GB memory devices.
// - "For4GBTo8GB" for 4GB to 8GB memory devices.
// - "For8GBTo16GB" for 8GB to 16GB memory devices.
// - "For16GBAndAbove" for 16GB memory and above devices.
BASE_EXPORT
std::string GetParamNameWithSuffix(const std::string& param_name);
// Provides a similar behavior with FeatureParam<std::string> except the return
// value is determined by the amount of physical memory.
BASE_EXPORT
std::string GetMiracleParameterAsString(const base::Feature& feature,
const std::string& param_name,
const std::string& default_value);
// Provides a similar behavior with FeatureParam<double> except the return value
// is determined by the amount of physical memory.
BASE_EXPORT
double GetMiracleParameterAsDouble(const base::Feature& feature,
const std::string& param_name,
double default_value);
// Provides a similar behavior with FeatureParam<int> except the return value is
// determined by the amount of physical memory.
BASE_EXPORT
int GetMiracleParameterAsInt(const base::Feature& feature,
const std::string& param_name,
int default_value);
// Provides a similar behavior with FeatureParam<bool> except the return value
// is determined by the amount of physical memory.
BASE_EXPORT
bool GetMiracleParameterAsBool(const base::Feature& feature,
const std::string& param_name,
bool default_value);
// Provides a similar behavior with FeatureParam<base::TimeDelta> except the
// return value is determined by the amount of physical memory.
BASE_EXPORT
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
const std::string& param_name,
base::TimeDelta default_value);
// Provides a similar behavior with FeatureParam<Enum> except the return value
// is determined by the amount of physical memory.
template <typename Enum>
Enum GetMiracleParameterAsEnum(
const base::Feature& feature,
const std::string& param_name,
const Enum default_value,
const base::span<const typename base::FeatureParam<Enum>::Option> options) {
return GetFieldTrialParamByFeatureAsEnum(
feature, GetParamNameWithSuffix(param_name),
GetFieldTrialParamByFeatureAsEnum(feature, param_name, default_value,
options),
options);
}
#define MIRACLE_PARAMETER_FOR_STRING(function_name, feature, param_name, \
default_value) \
std::string function_name() { \
static const std::string value = \
miracle_parameter::GetMiracleParameterAsString(feature, param_name, \
default_value); \
return value; \
}
#define MIRACLE_PARAMETER_FOR_DOUBLE(function_name, feature, param_name, \
default_value) \
double function_name() { \
static const double value = \
miracle_parameter::GetMiracleParameterAsDouble(feature, param_name, \
default_value); \
return value; \
}
#define MIRACLE_PARAMETER_FOR_INT(function_name, feature, param_name, \
default_value) \
int function_name() { \
static const int value = miracle_parameter::GetMiracleParameterAsInt( \
feature, param_name, default_value); \
return value; \
}
#define MIRACLE_PARAMETER_FOR_BOOL(function_name, feature, param_name, \
default_value) \
bool function_name() { \
static const bool value = miracle_parameter::GetMiracleParameterAsBool( \
feature, param_name, default_value); \
return value; \
}
#define MIRACLE_PARAMETER_FOR_TIME_DELTA(function_name, feature, param_name, \
default_value) \
base::TimeDelta function_name() { \
static const base::TimeDelta value = \
miracle_parameter::GetMiracleParameterAsTimeDelta(feature, param_name, \
default_value); \
return value; \
}
#define MIRACLE_PARAMETER_FOR_ENUM(function_name, feature, param_name, \
default_value, type, options) \
type function_name() { \
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
feature, param_name, default_value, base::span(options)); \
return value; \
}
} // namespace miracle_parameter
} // namespace base
#endif // BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
@@ -9,20 +9,15 @@
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/miracle_parameter.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/features.h"
#include "base/metrics/field_trial_params.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/thread_cache.h"
namespace base::features {
@@ -36,9 +31,7 @@ static constexpr char kAllProcessesStr[] = "all-processes";
} // namespace
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr, FEATURE_ENABLED_BY_DEFAULT);
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
kUnretainedDanglingPtrModeOption[] = {
@@ -60,7 +53,6 @@ constinit const FeatureParam<UnretainedDanglingPtrMode>
// presence of DPD, but hypothetically fully launching DPD should prompt
// a rethink of no-op `free()`.
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
FEATURE_ENABLED_BY_DEFAULT
#else
@@ -93,32 +85,16 @@ constinit const FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize",
FEATURE_ENABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(GetPartitionAllocLargeThreadCacheSizeValue,
kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSizeValue",
::partition_alloc::kThreadCacheLargeSizeThreshold)
MIRACLE_PARAMETER_FOR_INT(
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
::partition_alloc::kThreadCacheDefaultSizeThreshold)
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize, FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing",
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
#endif
BASE_FEATURE(kPartitionAllocWithAdvancedChecks,
"PartitionAllocWithAdvancedChecks",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocWithAdvancedChecks, FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>::Option
kPartitionAllocWithAdvancedChecksEnabledProcessesOptions[] = {
{PartitionAllocWithAdvancedChecksEnabledProcesses::kBrowserOnly,
@@ -137,7 +113,6 @@ constinit const FeatureParam<PartitionAllocWithAdvancedChecksEnabledProcesses>
&kPartitionAllocWithAdvancedChecksEnabledProcessesOptions};
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantine",
FEATURE_DISABLED_BY_DEFAULT);
// Scheduler Loop Quarantine's config.
// Note: Do not use the prepared macro as of no need for a local cache.
@@ -146,23 +121,40 @@ constinit const FeatureParam<std::string>
&kPartitionAllocSchedulerLoopQuarantine,
"PartitionAllocSchedulerLoopQuarantineConfig", "{}"};
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantineTaskControlledPurge,
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<
PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses>::Option
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcessesOptions[] =
{{PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses::
kBrowserOnly,
kBrowserOnlyStr},
{PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses::
kBrowserAndRenderer,
kBrowserAndRendererStr},
{PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses::
kNonRenderer,
kNonRendererStr},
{PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses::
kAllProcesses,
kAllProcessesStr}};
// Note: Do not use the prepared macro as of no need for a local cache.
constinit const FeatureParam<
PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses>
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcessesParam{
&kPartitionAllocSchedulerLoopQuarantineTaskControlledPurge,
"PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcess"
"es",
PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses::
kBrowserOnly,
&kPartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcessesOptions};
BASE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory,
"PartitionAllocEventuallyZeroFreedMemory",
FEATURE_DISABLED_BY_DEFAULT);
// Evaluated and positive stability and peformance-wise on Linux-based systems,
// disabled elsewhere (for now). Does not apply to Windows.
BASE_FEATURE(kPartitionAllocFewerMemoryRegions,
"PartitionAllocFewerMemoryRegions",
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
FEATURE_ENABLED_BY_DEFAULT);
#else
FEATURE_DISABLED_BY_DEFAULT);
#endif
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
FEATURE_ENABLED_BY_DEFAULT
#else
@@ -208,10 +200,15 @@ constinit const FeatureParam<bool> kBackupRefPtrSuppressDoubleFreeDetectedCrash{
false};
constinit const FeatureParam<bool> kBackupRefPtrSuppressCorruptionDetectedCrash{
&kPartitionAllocBackupRefPtr, "brp-suppress-corruption-detected-crash",
#if PA_BUILDFLAG(IS_IOS)
// TODO(crbug.com/41497028): Continue investigation and remove once
// addressed.
true};
#else
false};
#endif
BASE_FEATURE(kPartitionAllocMemoryTagging,
"PartitionAllocMemoryTagging",
#if PA_BUILDFLAG(USE_FULL_MTE) || BUILDFLAG(IS_ANDROID)
FEATURE_ENABLED_BY_DEFAULT
#else
@@ -223,7 +220,7 @@ constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
{MemtagMode::kSync, "sync"},
{MemtagMode::kAsync, "async"}};
// Note: Do not use the prepared macro as of no need for a local cache.
// Note: Do not use the prepared muacro as of no need for a local cache.
constinit const FeatureParam<MemtagMode> kMemtagModeParam{
&kPartitionAllocMemoryTagging, "memtag-mode",
#if PA_BUILDFLAG(USE_FULL_MTE)
@@ -260,13 +257,10 @@ constinit const FeatureParam<MemoryTaggingEnabledProcesses>
#endif
&kMemoryTaggingEnabledProcessesOptions};
BASE_FEATURE(kKillPartitionAllocMemoryTagging,
"KillPartitionAllocMemoryTagging",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kKillPartitionAllocMemoryTagging, FEATURE_DISABLED_BY_DEFAULT);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
BASE_FEATURE(kPartitionAllocPermissiveMte,
"PartitionAllocPermissiveMte",
#if PA_BUILDFLAG(USE_FULL_MTE)
// We want to actually crash if USE_FULL_MTE is enabled.
FEATURE_DISABLED_BY_DEFAULT
@@ -275,22 +269,16 @@ BASE_FEATURE(kPartitionAllocPermissiveMte,
#endif
);
BASE_FEATURE(kAsanBrpDereferenceCheck,
"AsanBrpDereferenceCheck",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kAsanBrpExtractionCheck,
"AsanBrpExtractionCheck", // Not much noise at the moment to
BASE_FEATURE(kAsanBrpDereferenceCheck, FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kAsanBrpExtractionCheck, // Not much noise at the moment to
FEATURE_DISABLED_BY_DEFAULT); // enable by default.
BASE_FEATURE(kAsanBrpInstantiationCheck,
"AsanBrpInstantiationCheck",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kAsanBrpInstantiationCheck, FEATURE_ENABLED_BY_DEFAULT);
// If enabled, switches the bucket distribution to a denser one.
//
// We enable this by default everywhere except for 32-bit Android, since we saw
// regressions there.
BASE_FEATURE(kPartitionAllocUseDenserDistribution,
"PartitionAllocUseDenserDistribution",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
FEATURE_DISABLED_BY_DEFAULT
#else
@@ -313,9 +301,7 @@ constinit const FeatureParam<BucketDistributionMode>
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
&kPartitionAllocBucketDistributionOption};
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
"PartitionAllocMemoryReclaimer",
FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocMemoryReclaimer, FEATURE_ENABLED_BY_DEFAULT);
BASE_FEATURE_PARAM(TimeDelta,
kPartitionAllocMemoryReclaimerInterval,
&kPartitionAllocMemoryReclaimer,
@@ -326,13 +312,11 @@ BASE_FEATURE_PARAM(TimeDelta,
// Configures whether we set a lower limit for renderers that do not have a main
// frame, similar to the limit that is already done for backgrounded renderers.
BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
"LowerPAMemoryLimitForNonMainRenderers",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
// ... -> PartitionPurgeSlotSpan().
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
"PartitionAllocStraightenLargerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT);
const FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>::
Option kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
@@ -355,19 +339,14 @@ constinit const FeatureParam<
// Whether to sort free lists for smaller slot spans in PurgeMemory().
BASE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists,
"PartitionAllocSortSmallerSlotSpanFreeLists",
FEATURE_ENABLED_BY_DEFAULT);
// Whether to sort the active slot spans in PurgeMemory().
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
"PartitionAllocSortActiveSlotSpans",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans, FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(IS_WIN)
// Whether to retry allocations when commit fails.
BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
"PageAllocatorRetryOnCommitFailure",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPageAllocatorRetryOnCommitFailure, FEATURE_DISABLED_BY_DEFAULT);
#endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
@@ -384,20 +363,6 @@ BASE_FEATURE_PARAM(bool,
false);
#endif
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
"EnableConfigurableThreadCacheMultiplier",
base::FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplier,
kEnableConfigurableThreadCacheMultiplier,
"ThreadCacheMultiplier",
2.)
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
kEnableConfigurableThreadCacheMultiplier,
"ThreadCacheMultiplierForAndroid",
1.)
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
TimeDelta time_delta) {
return partition_alloc::internal::base::Microseconds(
@@ -409,67 +374,7 @@ constexpr TimeDelta FromPartitionAllocTimeDelta(
return Microseconds(time_delta.InMicroseconds());
}
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
"EnableConfigurableThreadCachePurgeInterval",
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMinPurgeIntervalValue,
kEnableConfigurableThreadCachePurgeInterval,
"ThreadCacheMinPurgeInterval",
FromPartitionAllocTimeDelta(partition_alloc::kMinPurgeInterval))
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheMaxPurgeIntervalValue,
kEnableConfigurableThreadCachePurgeInterval,
"ThreadCacheMaxPurgeInterval",
FromPartitionAllocTimeDelta(partition_alloc::kMaxPurgeInterval))
MIRACLE_PARAMETER_FOR_TIME_DELTA(
GetThreadCacheDefaultPurgeIntervalValue,
kEnableConfigurableThreadCachePurgeInterval,
"ThreadCacheDefaultPurgeInterval",
FromPartitionAllocTimeDelta(partition_alloc::kDefaultPurgeInterval))
const partition_alloc::internal::base::TimeDelta
GetThreadCacheMinPurgeInterval() {
return ToPartitionAllocTimeDelta(GetThreadCacheMinPurgeIntervalValue());
}
const partition_alloc::internal::base::TimeDelta
GetThreadCacheMaxPurgeInterval() {
return ToPartitionAllocTimeDelta(GetThreadCacheMaxPurgeIntervalValue());
}
const partition_alloc::internal::base::TimeDelta
GetThreadCacheDefaultPurgeInterval() {
return ToPartitionAllocTimeDelta(GetThreadCacheDefaultPurgeIntervalValue());
}
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
FEATURE_DISABLED_BY_DEFAULT);
MIRACLE_PARAMETER_FOR_INT(
GetThreadCacheMinCachedMemoryForPurgingBytes,
kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
"ThreadCacheMinCachedMemoryForPurgingBytes",
partition_alloc::kMinCachedMemoryForPurgingBytes)
// An apparent quarantine leak in the buffer partition unacceptably
// bloats memory when MiraclePtr is enabled in the renderer process.
// We believe we have found and patched the leak, but out of an
// abundance of caution, we provide this toggle that allows us to
// wholly disable MiraclePtr in the buffer partition, if necessary.
//
// TODO(crbug.com/40064499): this is unneeded once
// MiraclePtr-for-Renderer launches.
BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
"PartitionAllocAdjustSizeWhenInForeground",
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT);
#else
@@ -478,7 +383,6 @@ BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
#if PA_BUILDFLAG(ENABLE_PARTITION_LOCK_PRIORITY_INHERITANCE)
BASE_FEATURE(kPartitionAllocUsePriorityInheritanceLocks,
"PartitionAllocUsePriorityInheritanceLocks",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(ENABLE_PARTITION_LOCK_PRIORITY_INHERITANCE)
@@ -16,10 +16,8 @@
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_root.h"
namespace base::features {
@@ -86,8 +84,6 @@ using PartitionAllocWithAdvancedChecksEnabledProcesses =
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
@@ -101,15 +97,19 @@ BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
std::string,
kPartitionAllocSchedulerLoopQuarantineConfig);
using PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses =
internal::PAFeatureEnabledProcesses;
BASE_EXPORT BASE_DECLARE_FEATURE(
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurge);
BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
PartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcesses,
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcessesParam);
// Eventually zero out most PartitionAlloc memory. This is not meant as a
// security guarantee, but to increase the compression ratio of PartitionAlloc's
// fragmented super pages.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocEventuallyZeroFreedMemory);
// Whether to make PartitionAlloc use fewer memory regions. This matters on
// Linux-based systems, where there is a per-process limit that we hit in some
// cases. See the comment in PartitionBucket::SlotSpanCOmmitedSize() for detail.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocFewerMemoryRegions);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using BackupRefPtrEnabledProcesses = internal::PAFeatureEnabledProcesses;
@@ -198,24 +198,6 @@ BASE_EXPORT BASE_DECLARE_FEATURE_PARAM(
kPartialLowEndModeExcludePartitionAllocSupport);
#endif
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
BASE_EXPORT double GetThreadCacheMultiplier();
BASE_EXPORT double GetThreadCacheMultiplierForAndroid();
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCachePurgeInterval);
extern const partition_alloc::internal::base::TimeDelta
GetThreadCacheMinPurgeInterval();
extern const partition_alloc::internal::base::TimeDelta
GetThreadCacheMaxPurgeInterval();
extern const partition_alloc::internal::base::TimeDelta
GetThreadCacheDefaultPurgeInterval();
BASE_EXPORT BASE_DECLARE_FEATURE(
kEnableConfigurableThreadCacheMinCachedMemoryForPurging);
BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
// When set, partitions use a larger ring buffer and free memory less
// aggressively when in the foreground.
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/rand_util.h"
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/allocator/partition_alloc_support.h"
#include <algorithm>
#include <array>
#include <cinttypes>
@@ -19,6 +18,7 @@
#include <string_view>
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_alloc_support.h"
#include "base/allocator/scheduler_loop_quarantine_config.h"
#include "base/at_exit.h"
#include "base/check.h"
@@ -42,7 +42,9 @@
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock_impl.h"
#include "base/synchronization/lock_metrics_recorder.h"
#include "base/system/sys_info.h"
#include "base/task/common/task_annotator.h"
#include "base/task/single_thread_task_runner.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
@@ -67,9 +69,11 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/pointers/instance_tracer.h"
#include "partition_alloc/pointers/raw_ptr.h"
#include "partition_alloc/scheduler_loop_quarantine.h"
#include "partition_alloc/shim/allocator_shim.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include "partition_alloc/spinning_mutex.h"
#include "partition_alloc/stack/stack.h"
#include "partition_alloc/thread_cache.h"
@@ -130,13 +134,40 @@ namespace switches {
constexpr char kZygoteProcess[] = "zygote";
} // namespace switches
class LockMetricsRecorderSupport
: public partition_alloc::internal::LockMetricsRecorderInterface {
public:
LockMetricsRecorderSupport() : recorder_(base::LockMetricsRecorder::Get()) {}
static LockMetricsRecorderSupport* Instance() {
static LockMetricsRecorderSupport instance;
return &instance;
}
bool ShouldRecordLockAcquisitionTime() const override {
return recorder_->ShouldRecordLockAcquisitionTime();
}
void RecordLockAcquisitionTime(
partition_alloc::internal::base::TimeDelta sample) override {
recorder_->RecordLockAcquisitionTime(
Microseconds(sample.InMicroseconds()),
base::LockMetricsRecorder::LockType::kPartitionAllocLock);
}
private:
base::LockMetricsRecorder* recorder_;
};
} // namespace
namespace {
void RunThreadCachePeriodicPurge() {
// Micros, since periodic purge should typically take at most a few ms.
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
SCOPED_UMA_HISTOGRAM_TIMER_MICROS_SUBSAMPLED(
"Memory.PartitionAlloc.PeriodicPurge.Subsampled",
base::ShouldRecordSubsampledMetric(0.01));
TRACE_EVENT0("memory", "PeriodicPurge");
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
instance.RunPeriodicPurge();
@@ -785,6 +816,11 @@ void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
// experiments.
}
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocMakeSchedulerLoopQuarantinePurgeNoOp,
FEATURE_ENABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void MakeFreeNoOp() {
// Ignoring `free()` during Shutdown would allow developers to introduce new
// dangling pointers. So we want to avoid ignoring free when it is enabled.
@@ -796,9 +832,18 @@ void MakeFreeNoOp() {
return;
}
#endif // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
if (base::FeatureList::IsEnabled(
kPartitionAllocMakeSchedulerLoopQuarantinePurgeNoOp)) {
partition_alloc::internal::ThreadBoundSchedulerLoopQuarantineBranch::
DangerouslyDisablePurge();
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
PartitionAllocSupport* PartitionAllocSupport::Get() {
@@ -931,6 +976,9 @@ void PartitionAllocSupport::ReconfigureEarlyish(
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
allocator_shim::EnablePartitionAllocMemoryReclaimer();
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::internal::SpinningMutex::SetLockMetricsRecorder(
LockMetricsRecorderSupport::Instance());
}
void PartitionAllocSupport::ReconfigureAfterZygoteFork(
@@ -976,6 +1024,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
base::allocator::InstallDanglingRawPtrChecks();
}
base::allocator::InstallUnretainedDanglingRawPtrChecks();
{
base::AutoLock scoped_lock(lock_);
// Avoid initializing more than once.
@@ -1017,10 +1066,9 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// Configure ASAN hooks to report the `MiraclePtr status`. This is enabled
// only if BackupRefPtr is normally enabled in the current process for the
// current platform. Note that CastOS and iOS aren't protected by BackupRefPtr
// current platform. Note that CastOS is not protected by BackupRefPtr
// a the moment, so they are excluded.
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS) && \
!PA_BUILDFLAG(IS_IOS)
#if PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS)
if (ShouldEnableFeatureOnProcess(
base::features::kBackupRefPtrEnabledProcessesParam.Get(),
process_type)) {
@@ -1036,7 +1084,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
EnableExtractionCheck(false),
EnableInstantiationCheck(false));
}
#endif // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#endif // PA_BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && !PA_BUILDFLAG(IS_CASTOS)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto bucket_distribution = allocator_shim::BucketDistribution::kNeutral;
@@ -1057,11 +1105,25 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
const auto scheduler_loop_quarantine_thread_local_config =
GetSchedulerLoopQuarantineConfiguration(
process_type, SchedulerLoopQuarantineBranchType::kThreadLocalDefault);
const auto
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config =
GetSchedulerLoopQuarantineConfiguration(
process_type,
SchedulerLoopQuarantineBranchType::kAdvancedMemorySafetyChecks);
if (base::FeatureList::IsEnabled(
base::features::
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurge) &&
ShouldEnableFeatureOnProcess(
base::features::
kPartitionAllocSchedulerLoopQuarantineTaskControlledPurgeEnabledProcessesParam
.Get(),
process_type)) {
base::EnableSchedulerLoopQuarantineTaskControlledPurge();
}
const bool eventually_zero_freed_memory = base::FeatureList::IsEnabled(
base::features::kPartitionAllocEventuallyZeroFreedMemory);
const bool fewer_memory_regions = base::FeatureList::IsEnabled(
base::features::kPartitionAllocFewerMemoryRegions);
bool enable_memory_tagging = false;
partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
@@ -1158,8 +1220,8 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
memory_tagging_reporting_mode, bucket_distribution,
scheduler_loop_quarantine_global_config,
scheduler_loop_quarantine_thread_local_config,
allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory),
allocator_shim::FewerMemoryRegions(fewer_memory_regions));
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config,
allocator_shim::EventuallyZeroFreedMemory(eventually_zero_freed_memory));
const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
// As per description, extras are optional and are expected not to
@@ -1237,56 +1299,31 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
// initialized later.
DCHECK(process_type != switches::kZygoteProcess);
partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
base::features::GetThreadCacheMinPurgeInterval(),
base::features::GetThreadCacheMaxPurgeInterval(),
base::features::GetThreadCacheDefaultPurgeInterval(),
size_t(base::features::GetThreadCacheMinCachedMemoryForPurgingBytes()));
base::allocator::StartThreadCachePeriodicPurge();
if (base::FeatureList::IsEnabled(
base::features::kEnableConfigurableThreadCacheMultiplier)) {
// If kEnableConfigurableThreadCacheMultiplier is enabled, override the
// multiplier value with the corresponding feature param.
#if BUILDFLAG(IS_ANDROID)
::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
base::features::GetThreadCacheMultiplierForAndroid());
#else // BUILDFLAG(IS_ANDROID)
::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
base::features::GetThreadCacheMultiplier());
#endif // BUILDFLAG(IS_ANDROID)
} else {
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
// thread cache limits on Android low end device to avoid stranding too much
// memory in the caches.
if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
features::kPartialLowEndModeExcludePartitionAllocSupport)) {
::partition_alloc::ThreadCacheRegistry::Instance()
.SetThreadCacheMultiplier(
::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
}
#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// Lower thread cache limits to avoid stranding too much memory in the caches.
if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
features::kPartialLowEndModeExcludePartitionAllocSupport)) {
::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
}
#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// Renderer processes are more performance-sensitive, increase thread cache
// limits.
if (process_type == switches::kRendererProcess &&
base::FeatureList::IsEnabled(
base::features::kPartitionAllocLargeThreadCacheSize)) {
largest_cached_size_ =
size_t(base::features::GetPartitionAllocLargeThreadCacheSizeValue());
largest_cached_size_ = ::partition_alloc::kThreadCacheLargeSizeThreshold;
#if BUILDFLAG(IS_ANDROID)
// Use appropriately lower amount for Android devices with 3GB or less.
// Devices almost always report less physical memory than what they actually
// have, so use 3.2GB (a threshold commonly uses throughout code) to avoid
// accidentally catching devices advertised as 4GB.
if (base::SysInfo::AmountOfPhysicalMemoryMB() < 3.2 * 1024) {
largest_cached_size_ = size_t(
base::features::
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid());
if (base::SysInfo::AmountOfPhysicalMemory().InGiBF() < 3.2) {
largest_cached_size_ = ::partition_alloc::kThreadCacheDefaultSizeThreshold;
}
#endif // BUILDFLAG(IS_ANDROID)
@@ -179,6 +179,7 @@ BASE_EXPORT void CheckHeapIntegrity(const void* ptr);
BASE_EXPORT void SetDoubleFreeOrCorruptionDetectedFn(void (*fn)(uintptr_t));
using partition_alloc::SchedulerLoopQuarantineScanPolicyUpdater;
using partition_alloc::ScopedSchedulerLoopQuarantineDisallowScanlessPurge;
using partition_alloc::ScopedSchedulerLoopQuarantineExclusion;
} // namespace base::allocator
@@ -16,7 +16,6 @@ shim_supports_sized_dealloc_default = false
enable_backup_ref_ptr_support_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
enable_ios_corruption_hardening_default = false
# This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true
@@ -54,6 +54,11 @@ if (!defined(partition_alloc_dcheck_always_on_default)) {
}
}
# Allows embedders to optionally disable PartitionAlloc.
if (!defined(use_partition_alloc_default)) {
use_partition_alloc_default = true
}
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
# access the generate "buildflags" and the "raw_ptr" definitions implemented
# with RawPtrNoOpImpl. Everything else is considered not supported.
@@ -124,7 +129,7 @@ declare_args() {
# and doesn't wish to incur the library size increase (crbug.com/674570).
# 2. On NaCl (through this declaration), where PartitionAlloc doesn't
# build at all.
use_partition_alloc = is_clang_or_gcc
use_partition_alloc = use_partition_alloc_default && is_clang_or_gcc
}
if (!is_clang_or_gcc) {
@@ -152,15 +157,6 @@ declare_args() {
use_partition_alloc_as_malloc
}
declare_args() {
# This is a flag for binary experiment on iOS. When BRP for iOS is enabled,
# we see some un-actionable `DoubleFreeOrCorruptionDetected` crashes.
# This flag enables some extra `CHECK`s to get actionable crash reports.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_ios_corruption_hardening = use_partition_alloc_as_malloc && is_ios &&
enable_ios_corruption_hardening_default
}
assert(
!enable_allocator_shim_partition_alloc_dispatch_with_advanced_checks_support || use_partition_alloc_as_malloc,
"PartitionAlloc with advanced checks requires PartitionAlloc itself.")
@@ -214,20 +210,19 @@ declare_args() {
# Enable reentrancy checks at `partition_alloc::internal::Lock`.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
enable_partition_lock_reentrancy_check = enable_ios_corruption_hardening
enable_partition_lock_reentrancy_check = false
# This will write a fixed cookie pattern at the end of each allocation, and
# later verify the pattern remain unchanged to ensure there is no OOB write.
# It comes with performance and memory cost, hence enabled only in debug.
use_partition_cookie =
partition_alloc_is_debug || partition_alloc_dcheck_always_on ||
enable_ios_corruption_hardening
partition_alloc_is_debug || partition_alloc_dcheck_always_on
# This will change partition cookie size to 4B or 8B, whichever equivalent to
# size of InSlotMetadata. This option is useful for InSlotMetadata corruption
# investigation.
# TODO(crbug.com/371135823): Remove upon completion of investigation.
smaller_partition_cookie = enable_ios_corruption_hardening
smaller_partition_cookie = false
}
declare_args() {
@@ -270,7 +265,7 @@ declare_args() {
# outside of Chromium.
use_asan_backup_ref_ptr =
build_with_chromium && is_asan &&
(is_win || is_android || is_linux || is_mac || is_chromeos)
(is_win || is_android || is_linux || is_apple || is_chromeos)
# Use probe-on-destruct unowned ptr detection with ASAN.
use_raw_ptr_asan_unowned_impl = false
@@ -305,13 +300,12 @@ declare_args() {
enable_backup_ref_ptr_feature_flag =
enable_backup_ref_ptr_support && use_raw_ptr_backup_ref_impl &&
# Platforms where BackupRefPtr hasn't shipped yet:
!is_castos && !is_ios
!is_castos
# While keeping BRP support, override a feature flag to make it disabled
# state. This will overwrite `enable_backup_ref_ptr_feature_flag`.
# TODO(https://crbug.com/372183586): Fix the bug and remove this arg.
force_disable_backup_ref_ptr_feature =
enable_backup_ref_ptr_support && enable_ios_corruption_hardening
force_disable_backup_ref_ptr_feature = enable_backup_ref_ptr_support && false
# Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP),
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
@@ -345,12 +339,11 @@ declare_args() {
declare_args() {
# Because of address space issue, this feature cannot be enabled on
# 32bit OS and iOS, i.e. has_64_bit_pointers && !is_ios
move_metadata_outside_gigacage = false && has_64_bit_pointers && !is_ios
move_metadata_outside_gigacage = has_64_bit_pointers && !is_ios
}
declare_args() {
enable_move_metadata_outside_gigacage_trial =
false && move_metadata_outside_gigacage
enable_move_metadata_outside_gigacage_trial = move_metadata_outside_gigacage
}
assert(!enable_move_metadata_outside_gigacage_trial ||
@@ -280,7 +280,7 @@ if (is_clang_or_gcc) {
}
config("memory_tagging") {
if (current_cpu == "arm64" &&
if (current_cpu == "arm64" && is_clang &&
(is_linux || is_chromeos || is_android || is_fuchsia)) {
# base/ has access to the MTE intrinsics because it needs to use them,
# but they're not backwards compatible. Use base::CPU::has_mte()
@@ -648,6 +648,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/memory/ref_counted.h",
"partition_alloc_base/memory/scoped_policy.h",
"partition_alloc_base/memory/scoped_refptr.h",
"partition_alloc_base/memory/stack_allocated.h",
"partition_alloc_base/no_destructor.h",
"partition_alloc_base/notreached.h",
"partition_alloc_base/numerics/checked_math.h",
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
#define PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
@@ -15,9 +15,13 @@ namespace partition_alloc {
namespace {
enum ExternalMetadataTrialGroupPercentage {
kEnabled = 10, // 10%
kDisabled = 10, // 10%
kEnabled = 25, // 25% enabled
kDisabled = 25, // 25% control
// Otherwise, disabled (default behavior)
};
// Rather than doing percentage group based assignment, set all clients to
// enabled when true.
constexpr bool kDefaultEnableExternalMetadataTrial = true;
ExternalMetadataTrialGroup s_externalMetadataJoinedGroup =
ExternalMetadataTrialGroup::kUndefined;
@@ -31,6 +35,11 @@ void SetExternalMetadataTrialGroup(ExternalMetadataTrialGroup group) {
namespace internal {
ExternalMetadataTrialGroup SelectExternalMetadataTrialGroup() {
if constexpr (kDefaultEnableExternalMetadataTrial) {
auto group = ExternalMetadataTrialGroup::kEnabled;
SetExternalMetadataTrialGroup(group);
return group;
}
uint32_t random = internal::RandomValue() /
static_cast<double>(std::numeric_limits<uint32_t>::max()) *
100.0;
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/internal_allocator.h"
namespace partition_alloc::internal {
@@ -46,7 +46,7 @@ class InternalAllocator {
}
template <typename U>
bool operator==(const InternalAllocator<U>&) {
bool operator==(const InternalAllocator<U>&) const {
// InternalAllocator<T> can free allocations made by InternalAllocator<U>.
return true;
}
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/oom_callback.h"
#include "partition_alloc/partition_alloc_check.h"
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_OOM_CALLBACK_H_
#define PARTITION_ALLOC_OOM_CALLBACK_H_
@@ -475,11 +475,7 @@ void TerminateAnotherProcessOnCommitFailure() {
return;
}
// LINT.IfChange(CHROME_RESULT_CODE_TERMINATED_BY_OTHER_PROCESS_ON_COMMIT_FAILURE)
static constexpr UINT kExitCode = 39;
// LINT.ThenChange(/chrome/common/chrome_result_codes.h:CHROME_RESULT_CODE_TERMINATED_BY_OTHER_PROCESS_ON_COMMIT_FAILURE)
::TerminateProcess(process_to_terminate, kExitCode);
::TerminateProcess(process_to_terminate, kTerminateOnCommitFailureExitCode);
::CloseHandle(process_to_terminate);
}
#endif
@@ -26,6 +26,12 @@
namespace partition_alloc {
// LINT.IfChange(CHROME_RESULT_CODE_TERMINATED_BY_OTHER_PROCESS_ON_COMMIT_FAILURE)
// Exit code to use when another process is terminated on commit failure.
// This is defined here to avoid a dependency on Chrome.
static constexpr unsigned int kTerminateOnCommitFailureExitCode = 39;
// LINT.ThenChange(/chrome/common/chrome_result_codes.h:CHROME_RESULT_CODE_TERMINATED_BY_OTHER_PROCESS_ON_COMMIT_FAILURE)
struct PageAccessibilityConfiguration {
enum Permissions {
kInaccessible,
@@ -32,7 +32,8 @@
#elif (PA_BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_PPC64))
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_PPC64)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64))
// This should work for all POSIX (if needed), but currently all other
// supported OS/architecture combinations use either hard-coded values
// (such as x86) or have means to determine these values without needing
@@ -130,7 +131,7 @@ PageAllocationGranularityShift() {
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
#elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
@@ -140,7 +141,7 @@ PageAllocationGranularityShift() {
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
#elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNAL_H_
#define PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNAL_H_
@@ -320,9 +320,14 @@ void PartitionAddressSpace::InitThreadIsolatedPool(
pool_size));
#if PA_CONFIG(MOVE_METADATA_OUT_OF_GIGACAGE)
offsets_to_metadata_[kThreadIsolatedPoolHandle] =
metadata_region_start_ - setup_.thread_isolated_pool_base_address_ +
MetadataInnerOffset(kThreadIsolatedPoolHandle);
if (metadata_region_start_ != kUninitializedPoolBaseAddress) {
offsets_to_metadata_[kThreadIsolatedPoolHandle] =
metadata_region_start_ - setup_.thread_isolated_pool_base_address_ +
MetadataInnerOffset(kThreadIsolatedPoolHandle);
} else {
// If no metadata region is available, use `SystemPageSize()`.
offsets_to_metadata_[kThreadIsolatedPoolHandle] = SystemPageSize();
}
#endif // PA_CONFIG(MOVE_METADATA_OUT_OF_GIGACAGE)
}
#endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
@@ -394,6 +399,9 @@ void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.thread_isolation_.enabled = false;
}
#if PA_CONFIG(MOVE_METADATA_OUT_OF_GIGACAGE)
offsets_to_metadata_[kThreadIsolatedPoolHandle] = SystemPageSize();
#endif // PA_CONFIG(MOVE_METADATA_OUT_OF_GIGACAGE)
}
#endif
@@ -444,6 +452,9 @@ void PartitionAddressSpace::InitMetadataRegionAndOffsets() {
// ConfigurablePool has not been initialized yet at this time.
offsets_to_metadata_[kConfigurablePoolHandle] = SystemPageSize();
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
offsets_to_metadata_[kThreadIsolatedPoolHandle] = SystemPageSize();
#endif // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
}
#endif // PA_CONFIG(MOVE_METADATA_OUT_OF_GIGACAGE)
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
@@ -495,7 +490,7 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
// Annotates code indicating that it should be permanently exempted from
// `-Wunsafe-buffer-usage`. For temporary cases such as migrating callers to
// safer patterns, use `UNSAFE_TODO()` instead;
// safer patterns, use `PA_UNSAFE_TODO()` instead;
#if defined(__clang__)
// Disabling `clang-format` allows each `_Pragma` to be on its own line, as
// recommended by https://gcc.gnu.org/onlinedocs/cpp/Pragmas.html.
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
@@ -2,13 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/files/file_util.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
namespace partition_alloc::internal::base {
@@ -16,8 +12,8 @@ namespace partition_alloc::internal::base {
bool ReadFromFD(int fd, char* buffer, size_t bytes) {
size_t total_read = 0;
while (total_read < bytes) {
ssize_t bytes_read =
WrapEINTR(read)(fd, buffer + total_read, bytes - total_read);
ssize_t bytes_read = WrapEINTR(read)(
fd, PA_UNSAFE_TODO(buffer + total_read), bytes - total_read);
if (bytes_read <= 0) {
break;
}
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
@@ -101,8 +101,7 @@ int MacOSMajorVersion() {
// Darwin major version 25 corresponds to macOS version 26. Assume a
// correspondence between Darwin's major version numbers and macOS major
// version numbers. TODO(https://crbug.com/424162749): Verify this before
// release.
// version numbers.
return darwin_major_version + 1;
}();
return macos_major_version;
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
@@ -0,0 +1,61 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_STACK_ALLOCATED_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_STACK_ALLOCATED_H_
#include <stddef.h>
#if defined(__clang__)
#define PA_STACK_ALLOCATED_IGNORE(reason) \
__attribute__((annotate("stack_allocated_ignore")))
#else // !defined(__clang__)
#define PA_STACK_ALLOCATED_IGNORE(reason)
#endif // !defined(__clang__)
// If a class or one of its ancestor classes is annotated with
// PA_STACK_ALLOCATED() in its class definition, then instances of the class may
// not be allocated on the heap or as a member variable of a non-stack-allocated
// class.
#define PA_STACK_ALLOCATED() \
public: \
using IsStackAllocatedTypeMarker [[maybe_unused]] = int; \
\
private: \
void* operator new(size_t) = delete; \
void* operator new(size_t, ::partition_alloc::internal::base::NotNullTag, \
void*) = delete; \
void* operator new(size_t, void*) = delete
namespace partition_alloc::internal::base {
// NotNullTag was originally added to WebKit here:
// https://trac.webkit.org/changeset/103243/webkit
// ...with the stated goal of improving the performance of the placement new
// operator and potentially enabling the -fomit-frame-pointer compiler flag.
//
// TODO(szager): The placement new operator which uses this tag is currently
// defined in third_party/blink/renderer/platform/wtf/allocator/allocator.h,
// in the global namespace. It should probably move to /base.
//
// It's unknown at the time of writing whether it still provides any benefit
// (or if it ever did). It is used by placing the kNotNull tag before the
// address of the object when calling placement new.
//
// If the kNotNull tag is specified to placement new for a null pointer,
// Undefined Behaviour can result.
//
// Example:
//
// union { int i; } u;
//
// // Typically placement new looks like this.
// new (&u.i) int(3);
// // But we can promise `&u.i` is not null like this.
// new (base::NotNullTag::kNotNull, &u.i) int(3);
enum class NotNullTag { kNotNull };
} // namespace partition_alloc::internal::base
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_STACK_ALLOCATED_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_PROCESS_PROCESS_HANDLE_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_PROCESS_PROCESS_HANDLE_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/process/process_handle.h"
#include <unistd.h>
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/rand_util.h"
#include <climits>
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
@@ -2,37 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/strings/string_util.h"
#include <cstring>
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
namespace partition_alloc::internal::base::strings {
const char* FindLastOf(const char* text, const char* characters) {
size_t length = strlen(text);
const char* ptr = text + length - 1;
const char* ptr = PA_UNSAFE_TODO(text + length - 1);
while (ptr >= text) {
if (strchr(characters, *ptr)) {
if (PA_UNSAFE_TODO(strchr(characters, *ptr))) {
return ptr;
}
--ptr;
PA_UNSAFE_TODO(--ptr);
}
return nullptr;
}
const char* FindLastNotOf(const char* text, const char* characters) {
size_t length = strlen(text);
const char* ptr = text + length - 1;
const char* ptr = PA_UNSAFE_TODO(text + length - 1);
while (ptr >= text) {
if (!strchr(characters, *ptr)) {
if (!PA_UNSAFE_TODO(strchr(characters, *ptr))) {
return ptr;
}
--ptr;
PA_UNSAFE_TODO(--ptr);
}
return nullptr;
}
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
namespace partition_alloc::internal::base {
@@ -58,7 +58,7 @@ void PlatformThreadForTesting::YieldCurrentThread() {
size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
#if PA_BUILDFLAG(IS_IOS)
return 0;
return 1024 * 1024;
#else
// The macOS default for a pthread stack size is 512kB.
// Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include <sys/time.h>
#include <cstdint>
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/time/time_override.h"
#include "partition_alloc/partition_alloc_base/check.h"
@@ -2,10 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BUILDFLAGS_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BUILDFLAGS_H_
@@ -165,6 +165,18 @@ constexpr bool kUseLazyCommit = true;
constexpr bool kUseLazyCommit = false;
#endif
// See the comment in PartitionBucket::SlotSpanCommittedSize(). This should not
// be enabled on Windows (because it increases committed memory, which is a
// limited system-wide resource on this platform). It has been evaluated on
// macOS, where it yielded no beenefit (nor any real downside).
constexpr bool kUseFewerMemoryRegions =
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_ANDROID) || \
PA_BUILDFLAG(IS_CHROMEOS)
true;
#else
false;
#endif
// On these platforms, lock all the partitions before fork(), and unlock after.
// This may be required on more platforms in the future.
#define PA_CONFIG_HAS_ATFORK_HANDLER() \
@@ -69,7 +69,9 @@ enum class FreeFlags {
kNoHooks = 1 << 1, // Internal.
// Quarantine for a while to ensure no UaF from on-stack pointers.
kSchedulerLoopQuarantine = 1 << 2,
kMaxValue = kSchedulerLoopQuarantine,
// Quarantine for a while to ensure no UaF from on-stack pointers.
kSchedulerLoopQuarantineForAdvancedMemorySafetyChecks = 1 << 3,
kMaxValue = kSchedulerLoopQuarantineForAdvancedMemorySafetyChecks,
};
PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
} // namespace internal
@@ -110,7 +112,7 @@ PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return PageAllocationGranularityShift() + 2;
}
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
#elif defined(_MIPS_ARCH_LOONGSON)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return 16; // 64 KiB
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_FOR_TESTING_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_FOR_TESTING_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_hooks.h"
#include <ostream>
@@ -239,7 +239,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
// Note that this only affects allocations that are not served out of the
// thread cache, but as a simple example the buffer partition in blink is
// frequently used for large allocations (e.g. ArrayBuffer), and frequent,
// small ones (e.g. WTF::String), and does not have a thread cache.
// small ones (e.g. blink::String), and does not have a thread cache.
ScopedUnlockGuard scoped_unlock{PartitionRootLock(root)};
const size_t slot_size = PartitionRoot::GetDirectMapSlotSize(raw_size);
@@ -1449,7 +1449,7 @@ void PartitionBucket::InitializeSlotSpanForGwpAsan(
size_t PartitionBucket::SlotSpanCommittedSize(PartitionRoot* root) const {
// With lazy commit, we certainly don't want to commit more than
// necessary. This is not reached, but keep the CHECK() as documentation.
PA_CHECK(!kUseLazyCommit);
static_assert(!(kUseLazyCommit && kUseFewerMemoryRegions));
// Memory is reserved in units of PartitionPage, but a given slot span may be
// smaller than the reserved area. For instance (assuming 4k pages), for a
@@ -1473,7 +1473,9 @@ size_t PartitionBucket::SlotSpanCommittedSize(PartitionRoot* root) const {
// less than 2^16, and Chromium sometimes hits the limit (see
// /proc/sys/vm/max_map_count for the current limit), largely because of
// PartitionAlloc contributing thousands of regions. Locally, on a Linux
// system, this reduces the number of PartitionAlloc regions by up to ~4x.
// system, this reduces the number of PartitionAlloc regions by up to
// ~4x. This has been shown to meaningfully reduce crash rate on Linux-based
// platforms.
//
// Why is it safe?
// The extra memory is not used by anything, so committing it doesn't make a
@@ -1488,19 +1490,13 @@ size_t PartitionBucket::SlotSpanCommittedSize(PartitionRoot* root) const {
// the size of the VMA red-black tree in the kernel), it might increase
// slightly the cases where we bump into the sandbox memory limit.
//
// Is it safe to do while running?
// Since this is decided through root settings, the value changes at runtime,
// so we may decommit memory that was never committed. This is safe onLinux,
// since decommitting is just changing permissions back to PROT_NONE, which
// the tail end would already have.
//
// Can we do better?
// For simplicity, we do not "fix" the regions that were committed before the
// settings are changed (after feature list initialization). This means that
// we end up with more regions that we could. The intent is to run a field
// experiment, then change the default value, at which point we get the full
// impact, so this is only temporary.
return root->settings.fewer_memory_regions
return kUseFewerMemoryRegions
? (get_pages_per_slot_span() << PartitionPageShift())
: get_bytes_per_span();
}
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_freelist_entry.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
// Holds functions for generating OOM errors from PartitionAlloc. This is
// distinct from oom.h in that it is meant only for use in PartitionAlloc.
@@ -110,7 +110,25 @@ struct SlotSpanMetadata {
public:
// Checks if it is feasible to store raw_size.
PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
PA_ALWAYS_INLINE bool CanStoreRawSize() const {
#if defined(THREAD_SANITIZER)
// `can_store_raw_size_` is a cache of the value in the bucket, stored there
// to avoid touching `bucket`. It causes issues with TSAN, since it is part
// of a bitfield along with non-constant values.
//
// The warning is correct, though it is extremely unlikely to cause issues
// in practice, as the element in the bitfield is constant, so the only case
// where it would cause issue is when a non-atomic read to a variable can
// give garbage results, where the bits are neither from the old nor the new
// value.
//
// TODO(crbug.com/437026570): Fix that properly, rather than relying on
// effectively a suppression.
return bucket->CanStoreRawSize();
#else
return can_store_raw_size_;
#endif
}
// Returns the total size of the slots that are currently provisioned.
PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
@@ -1062,12 +1062,13 @@ void PartitionRoot::Init(PartitionOptions opts) {
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS)
settings.eventually_zero_freed_memory =
opts.eventually_zero_freed_memory == PartitionOptions::kEnabled;
settings.fewer_memory_regions =
opts.fewer_memory_regions == PartitionOptions::kEnabled;
scheduler_loop_quarantine.Configure(
scheduler_loop_quarantine_root,
opts.scheduler_loop_quarantine_global_config);
scheduler_loop_quarantine_for_advanced_memory_safety_checks.Configure(
scheduler_loop_quarantine_root,
opts.scheduler_loop_quarantine_for_advanced_memory_safety_checks_config);
settings.scheduler_loop_quarantine_thread_local_config =
opts.scheduler_loop_quarantine_thread_local_config;
@@ -1190,10 +1191,14 @@ void PartitionRoot::Init(PartitionOptions opts) {
PartitionRoot::Settings::Settings() = default;
PartitionRoot::PartitionRoot()
: scheduler_loop_quarantine_root(*this), scheduler_loop_quarantine(this) {}
: scheduler_loop_quarantine_root(*this),
scheduler_loop_quarantine(this),
scheduler_loop_quarantine_for_advanced_memory_safety_checks(this) {}
PartitionRoot::PartitionRoot(PartitionOptions opts)
: scheduler_loop_quarantine_root(*this), scheduler_loop_quarantine(this) {
: scheduler_loop_quarantine_root(*this),
scheduler_loop_quarantine(this),
scheduler_loop_quarantine_for_advanced_memory_safety_checks(this) {
Init(opts);
}
@@ -1845,11 +1850,7 @@ PA_NOINLINE void PartitionRoot::QuarantineForBrp(
if (hook) [[unlikely]] {
hook(object, usable_size);
} else {
// TODO(https://crbug.com/371135823): Enable zapping again once finished
// investigation.
#if !PA_BUILDFLAG(IS_IOS)
internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
#endif // !PA_BUILDFLAG(IS_IOS)
}
}
#endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@@ -184,6 +184,11 @@ struct PartitionOptions {
// each `ThreadCache` instance.
internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_thread_local_config;
// Configuration for the AMSC quarantine branch. Used when
// `FreeFlags::kSchedulerLoopQuarantineForAdvancedMemorySafetyChecks` is
// specified.
internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config;
// As the name implies, this is not a security measure, as there is no
// guarantee that memorys has been zeroed out when handed back to the
@@ -191,17 +196,6 @@ struct PartitionOptions {
// compression ratio of freed memory inside partially allocated pages (due to
// fragmentation).
EnableToggle eventually_zero_freed_memory = kDisabled;
// Linux-based systems have a limited per-process VMA limit, be more
// conservative there. This matches the feature setting in
// partition_alloc_features.cc, but not all clients use Chromium's feature
// system to configure PartitionAlloc.
EnableToggle fewer_memory_regions =
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_ANDROID) || \
PA_BUILDFLAG(IS_CHROMEOS)
kEnabled;
#else
kDisabled;
#endif
struct {
EnableToggle enabled = kDisabled;
@@ -274,7 +268,6 @@ struct alignas(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool eventually_zero_freed_memory = false;
internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_thread_local_config;
bool fewer_memory_regions = false;
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_ = false;
bool use_random_memory_tagging_ = false;
@@ -394,6 +387,8 @@ struct alignas(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
size_t scheduler_loop_quarantine_branch_capacity_in_bytes = 0;
internal::SchedulerLoopQuarantineRoot scheduler_loop_quarantine_root;
internal::GlobalSchedulerLoopQuarantineBranch scheduler_loop_quarantine;
internal::GlobalSchedulerLoopQuarantineBranch
scheduler_loop_quarantine_for_advanced_memory_safety_checks;
static constexpr internal::base::TimeDelta kMaxPurgeDuration =
internal::base::Milliseconds(2);
@@ -1544,6 +1539,13 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
scheduler_loop_quarantine.Quarantine(object, slot_span, slot_start);
}
return;
} else if constexpr (
ContainsFlags(
flags,
FreeFlags::kSchedulerLoopQuarantineForAdvancedMemorySafetyChecks)) {
scheduler_loop_quarantine_for_advanced_memory_safety_checks.Quarantine(
object, slot_span, slot_start);
return;
}
// TODO(keishi): Create function to convert |object| to |slot_start_ptr|.
@@ -1566,7 +1568,6 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeAfterBRPQuarantine(
// Iterating over the entire slot can be really expensive.
#if PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
#if !PA_BUILDFLAG(IS_IOS)
auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
// If we have a hook the object segment is not necessarily filled
// with |kQuarantinedByte|.
@@ -1577,7 +1578,6 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeAfterBRPQuarantine(
PA_DCHECK(object[i] == internal::kQuarantinedByte);
}
}
#endif // !PA_BUILDFLAG(IS_IOS)
internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
internal::kFreedByte, slot_span->GetUtilizedSlotSize());
#endif // PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
@@ -2424,7 +2424,7 @@ void* PartitionRoot::ReallocInline(void* ptr,
// factor to the new size to avoid this issue. This workaround is only
// intended to be used for Skia bots, and is not intended to be a general
// solution.
if (new_size > old_usable_size && new_size > 12 << 20) {
if (new_size > old_usable_size) {
// 1.5x growth factor.
// Note that in case of integer overflow, the std::max ensures that the
// new_size is at least as large as the old_usable_size.
@@ -2,19 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_stats.h"
#include <cstring>
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
namespace partition_alloc {
SimplePartitionStatsDumper::SimplePartitionStatsDumper() {
memset(&stats_, 0, sizeof(stats_));
PA_UNSAFE_TODO(memset(&stats_, 0, sizeof(stats_)));
}
void SimplePartitionStatsDumper::PartitionDumpTotals(
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
// IWYU pragma: private, include "base/memory/raw_ptr.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_H_
@@ -680,21 +675,25 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
return static_cast<U*>(GetForExtraction());
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator++() {
// PRECONDITIONS: `this` must not be at the end of the range.
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr& operator++() {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, 1, true);
wrapped_ptr_ = PA_UNSAFE_TODO(Impl::Advance(wrapped_ptr_, 1, true));
return *this;
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator--() {
// PRECONDITIONS: `this` must not be at the start of the range.
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr& operator--() {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, 1, true);
wrapped_ptr_ = PA_UNSAFE_TODO(Impl::Retreat(wrapped_ptr_, 1, true));
return *this;
}
PA_ALWAYS_INLINE constexpr raw_ptr operator++(int /* post_increment */) {
// PRECONDITIONS: `this` must not be at the end of the range.
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr operator++(
int /* post_increment */) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
@@ -702,7 +701,9 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
++(*this);
return result;
}
PA_ALWAYS_INLINE constexpr raw_ptr operator--(int /* post_decrement */) {
// PRECONDITIONS: `this` must not be at the start of the range.
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr operator--(
int /* post_decrement */) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
@@ -710,40 +711,48 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
--(*this);
return result;
}
// PRECONDITIONS: `this` must be at least `delta_elems` before range end.
template <
typename Z,
typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
PA_ALWAYS_INLINE constexpr raw_ptr& operator+=(Z delta_elems) {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr& operator+=(
Z delta_elems) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems, true);
wrapped_ptr_ =
PA_UNSAFE_TODO(Impl::Advance(wrapped_ptr_, delta_elems, true));
return *this;
}
// PRECONDITIONS: `this` must be at least `delta_elems` after range start.
template <
typename Z,
typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
PA_ALWAYS_INLINE constexpr raw_ptr& operator-=(Z delta_elems) {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr raw_ptr& operator-=(
Z delta_elems) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, delta_elems, true);
wrapped_ptr_ =
PA_UNSAFE_TODO(Impl::Retreat(wrapped_ptr_, delta_elems, true));
return *this;
}
// PRECONDITIONS: `delta_elems` must be an index inside the range.
template <typename Z,
typename U = T,
typename = std::enable_if_t<
!std::is_void_v<typename std::remove_cv<U>::type> &&
partition_alloc::internal::is_offset_type<Z>>>
PA_ALWAYS_INLINE constexpr U& operator[](Z delta_elems) const {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE constexpr U& operator[](
Z delta_elems) const {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot index raw_ptr unless AllowPtrArithmetic trait is present.");
// Call SafelyUnwrapPtrForDereference() to simulate what GetForDereference()
// does, but without creating a temporary.
return *Impl::SafelyUnwrapPtrForDereference(
Impl::Advance(wrapped_ptr_, delta_elems, false));
PA_UNSAFE_TODO(Impl::Advance(wrapped_ptr_, delta_elems, false)));
}
// Do not disable operator+() and operator-().
@@ -759,31 +768,40 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// operators for Z=uint64_t on 32-bit systems. The compiler instead would
// generate code that converts `raw_ptr<T>` to `T*` and adds uint64_t to that,
// bypassing the OOB protection entirely.
//
// PRECONDITIONS: `this` must be at least `delta_elems` before range end.
template <typename Z>
PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(const raw_ptr& p,
Z delta_elems) {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(
const raw_ptr& p,
Z delta_elems) {
// Don't check `is_offset_type<Z>` here, as existence of `Advance` is
// already gated on that, and we'd get double errors.
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot add to raw_ptr unless AllowPtrArithmetic trait is present.");
raw_ptr result = Impl::Advance(p.wrapped_ptr_, delta_elems, false);
raw_ptr result =
PA_UNSAFE_TODO(Impl::Advance(p.wrapped_ptr_, delta_elems, false));
return result;
}
// PRECONDITIONS: `this` must be at least `delta_elems` before range end.
template <typename Z>
PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(Z delta_elems,
const raw_ptr& p) {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(
Z delta_elems,
const raw_ptr& p) {
return p + delta_elems;
}
// PRECONDITIONS: `this` must be at least `delta_elems` after range start.
template <typename Z>
PA_ALWAYS_INLINE friend constexpr raw_ptr operator-(const raw_ptr& p,
Z delta_elems) {
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE friend constexpr raw_ptr operator-(
const raw_ptr& p,
Z delta_elems) {
// Don't check `is_offset_type<Z>` here, as existence of `Retreat` is
// already gated on that, and we'd get double errors.
static_assert(raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot subtract from raw_ptr unless AllowPtrArithmetic "
"trait is present.");
raw_ptr result = Impl::Retreat(p.wrapped_ptr_, delta_elems, false);
raw_ptr result =
PA_UNSAFE_TODO(Impl::Retreat(p.wrapped_ptr_, delta_elems, false));
return result;
}
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
@@ -340,15 +335,18 @@ struct RawPtrBackupRefImpl {
// Advance the wrapped pointer by `delta_elems`.
// `is_in_pointer_modification` means that the result is intended to modify
// the pointer (as opposed to creating a new one).
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` before the
// end of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Advance(T* wrapped_ptr, Z delta_elems, bool is_in_pointer_modification) {
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr + delta_elems;
return PA_UNSAFE_BUFFERS(wrapped_ptr + delta_elems);
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
// When modifying the pointer, we have to make sure it doesn't migrate to a
@@ -357,24 +355,29 @@ struct RawPtrBackupRefImpl {
// properly. Do it anyway if extra OOB checks are enabled.
if (PA_BUILDFLAG(BACKUP_REF_PTR_EXTRA_OOB_CHECKS) ||
is_in_pointer_modification) {
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr + delta_elems);
unpoisoned_ptr, PA_UNSAFE_BUFFERS(unpoisoned_ptr + delta_elems));
}
return unpoisoned_ptr + delta_elems;
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
return PA_UNSAFE_BUFFERS(unpoisoned_ptr + delta_elems);
}
// Retreat the wrapped pointer by `delta_elems`.
// `is_in_pointer_modification` means that the result is intended to modify
// the pointer (as opposed to creating a new one).
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` after
// the start of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Retreat(T* wrapped_ptr, Z delta_elems, bool is_in_pointer_modification) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr - delta_elems;
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
return PA_UNSAFE_BUFFERS(wrapped_ptr - delta_elems);
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
// When modifying the pointer, we have to make sure it doesn't migrate to a
@@ -383,10 +386,12 @@ struct RawPtrBackupRefImpl {
// properly. Do it anyway if extra OOB checks are enabled.
if (PA_BUILDFLAG(BACKUP_REF_PTR_EXTRA_OOB_CHECKS) ||
is_in_pointer_modification) {
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr - delta_elems);
unpoisoned_ptr, PA_UNSAFE_BUFFERS(unpoisoned_ptr - delta_elems));
}
return unpoisoned_ptr - delta_elems;
// SAFETY: Preconditions enforced by PA_UNSAFE_BUFFER_USAGE.
return PA_UNSAFE_BUFFERS(unpoisoned_ptr - delta_elems);
}
template <typename T>
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
// IWYU pragma: private, include "base/memory/raw_ptr_exclusion.h"
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
@@ -132,39 +127,45 @@ struct RawPtrHookableImpl {
}
// Advance the wrapped pointer by `delta_elems`.
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` before the
// end of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Advance(T* wrapped_ptr, Z delta_elems, bool /*is_in_pointer_modification*/) {
// SAFETY: required from caller, enforced by PA_UNSAFE_BUFFER_USAGE.
if (!partition_alloc::internal::base::is_constant_evaluated()) {
if (EnableHooks) {
GetRawPtrHooks()->advance(
reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
GetRawPtrHooks()->advance(reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(PA_UNSAFE_BUFFERS(
wrapped_ptr + delta_elems)));
}
}
return wrapped_ptr + delta_elems;
return PA_UNSAFE_BUFFERS(wrapped_ptr + delta_elems);
}
// Retreat the wrapped pointer by `delta_elems`.
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` after
// the start of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Retreat(T* wrapped_ptr, Z delta_elems, bool /*is_in_pointer_modification*/) {
// SAFETY: required from caller, enforced by PA_UNSAFE_BUFFER_USAGE.
if (!partition_alloc::internal::base::is_constant_evaluated()) {
if (EnableHooks) {
GetRawPtrHooks()->advance(
reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
GetRawPtrHooks()->advance(reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(PA_UNSAFE_BUFFERS(
wrapped_ptr - delta_elems)));
}
}
return wrapped_ptr - delta_elems;
return PA_UNSAFE_BUFFERS(wrapped_ptr - delta_elems);
}
template <typename T>
@@ -121,10 +121,11 @@ void CrossKindConversionFromMayDangle() {
// Conversions may add the `kMayDangle` trait, but not remove it.
DanglingPtrA ptr_a1 = new TypeA();
DanglingPtrB ptr_b1 = new TypeB();
// TODO(https://crbug.com/437910658): remove regex check for both diagnostics once we roll clang
raw_ptr<TypeA> ptr_a2 = ptr_a1; // expected-error {{no viable conversion from 'raw_ptr<[...], base::RawPtrTraits::kMayDangle aka 1>' to 'raw_ptr<[...], (default) RawPtrTraits::kEmpty aka 0>'}}
raw_ptr<TypeA> ptr_a3(ptr_a1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kMayDangle>::Traits | RawPtrTraits::kMayDangle)'}}
raw_ptr<TypeA> ptr_a3(ptr_a1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kMayDangle>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
raw_ptr<TypeA> ptr_a4 = std::move(ptr_a1); // expected-error-re {{no viable conversion from '__libcpp_remove_reference_t<{{(base::)?}}raw_ptr<{{(\(anonymous namespace\)::)?}}TypeA, partition_alloc::internal::RawPtrTraits::kMayDangle> &>' (aka 'base::raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kMayDangle>') to 'raw_ptr<TypeA>'}}
raw_ptr<TypeB> ptr_b2(std::move(ptr_b1)); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeB, partition_alloc::internal::RawPtrTraits::kMayDangle>::Traits | RawPtrTraits::kMayDangle)'}}
raw_ptr<TypeB> ptr_b2(std::move(ptr_b1)); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeB, partition_alloc::internal::RawPtrTraits::kMayDangle>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
}
void CrossKindConversionFromDummy() {
@@ -132,9 +133,9 @@ void CrossKindConversionFromDummy() {
raw_ptr<TypeA, base::RawPtrTraits::kDummyForTest> ptr_a1 = new TypeA();
raw_ptr<TypeB, base::RawPtrTraits::kDummyForTest> ptr_b1 = new TypeB();
DanglingPtrA ptr_a2 = ptr_a1; // expected-error {{no viable conversion from 'raw_ptr<[...], base::RawPtrTraits::kDummyForTest aka 2048>' to 'raw_ptr<[...], base::RawPtrTraits::kMayDangle aka 1>'}}
DanglingPtrA ptr_a3(ptr_a1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kDummyForTest>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingPtrA ptr_a3(ptr_a1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kDummyForTest>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
DanglingPtrA ptr_a4 = std::move(ptr_a1); // expected-error-re {{no viable conversion from '__libcpp_remove_reference_t<{{(base::)?}}raw_ptr<{{(\(anonymous namespace\)::)?}}TypeA, partition_alloc::internal::RawPtrTraits::kDummyForTest> &>' (aka 'base::raw_ptr<(anonymous namespace)::TypeA, partition_alloc::internal::RawPtrTraits::kDummyForTest>') to 'DanglingPtrA' (aka 'raw_ptr<TypeA, base::RawPtrTraits::kMayDangle>')}}
DanglingPtrB ptr_b2(std::move(ptr_b1)); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeB, partition_alloc::internal::RawPtrTraits::kDummyForTest>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingPtrB ptr_b2(std::move(ptr_b1)); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeB, partition_alloc::internal::RawPtrTraits::kDummyForTest>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
}
void CantStorePointerObtainedFromEphemeralRawAddr() {
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_NOOP_IMPL_H_
#define PARTITION_ALLOC_POINTERS_RAW_PTR_NOOP_IMPL_H_
@@ -68,25 +63,31 @@ struct RawPtrNoOpImpl {
}
// Advance the wrapped pointer by `delta_elems`.
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` before the
// end of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Advance(T* wrapped_ptr, Z delta_elems, bool /*is_in_pointer_modification*/) {
return wrapped_ptr + delta_elems;
// SAFETY: required from caller, enforced by PA_UNSAFE_BUFFER_USAGE.
return PA_UNSAFE_BUFFERS(wrapped_ptr + delta_elems);
}
// Retreat the wrapped pointer by `delta_elems`.
// PRECONDITIONS: `wrapped_ptr` must be at least `delta_elems` after
// the start of the range.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T*
PA_UNSAFE_BUFFER_USAGE PA_ALWAYS_INLINE static constexpr T*
Retreat(T* wrapped_ptr, Z delta_elems, bool /*is_in_pointer_modification*/) {
return wrapped_ptr - delta_elems;
// SAFETY: required from caller, enforced by PA_UNSAFE_BUFFER_USAGE.
return PA_UNSAFE_BUFFERS(wrapped_ptr - delta_elems);
}
template <typename T>
@@ -36,10 +36,11 @@ void CrossKindConversionFromMayDangle() {
DanglingRefB ref_b1(b);
DanglingRefC ref_c1(c);
DanglingRefD ref_d1(d);
raw_ref<TypeA> ref_a2 = ref_a1; // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeA, 5>::Traits | RawPtrTraits::kMayDangle)'}}
raw_ref<TypeB> ref_b2(ref_b1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeB, 5>::Traits | RawPtrTraits::kMayDangle)'}}
raw_ref<TypeC> ref_c2 = std::move(ref_c1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeC, 5>::Traits | RawPtrTraits::kMayDangle)'}}
raw_ref<TypeD> ref_d2(std::move(ref_d1)); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeD, 5>::Traits | RawPtrTraits::kMayDangle)'}}
// TODO(https://crbug.com/437910658): remove regex check for both diagnostics once we roll clang
raw_ref<TypeA> ref_a2 = ref_a1; // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeA, 5>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
raw_ref<TypeB> ref_b2(ref_b1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeB, 5>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
raw_ref<TypeC> ref_c2 = std::move(ref_c1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeC, 5>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
raw_ref<TypeD> ref_d2(std::move(ref_d1)); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeD, 5>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
}
void CrossKindConversionFromDummy() {
@@ -52,10 +53,10 @@ void CrossKindConversionFromDummy() {
raw_ref<TypeB, base::RawPtrTraits::kDummyForTest> ref_b1(b);
raw_ref<TypeC, base::RawPtrTraits::kDummyForTest> ref_c1(c);
raw_ref<TypeD, base::RawPtrTraits::kDummyForTest> ref_d1(d);
DanglingRefA ref_a2 = ref_a1; // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeA, 2052>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingRefB ref_b3(ref_b1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeB, 2052>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingRefC ref_c2 = std::move(ref_c1); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeC, 2052>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingRefD ref_d2(std::move(ref_d1)); // expected-error@*:* {{static assertion failed due to requirement 'Traits == (raw_ptr<(anonymous namespace)::TypeD, 2052>::Traits | RawPtrTraits::kMayDangle)'}}
DanglingRefA ref_a2 = ref_a1; // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeA, 2052>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
DanglingRefB ref_b3(ref_b1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeB, 2052>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
DanglingRefC ref_c2 = std::move(ref_c1); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeC, 2052>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
DanglingRefD ref_d2(std::move(ref_d1)); // expected-error-re@*:* {{static assertion failed due to requirement 'Traits == ({{(base::)?}}raw_ptr<(anonymous namespace)::TypeD, 2052>::Traits | {{(partition_alloc::internal::)?}}RawPtrTraits::kMayDangle)'}}
}
} // namespace
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/random.h"
#include <type_traits>
@@ -9,6 +9,8 @@
#include "partition_alloc/scheduler_loop_quarantine.h"
#include <atomic>
#include "partition_alloc/internal_allocator.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_page.h"
@@ -34,6 +36,11 @@ class PA_SCOPED_LOCKABLE FakeScopedGuard {
template <bool thread_bound>
using ScopedGuardIfNeeded =
std::conditional_t<thread_bound, FakeScopedGuard, ScopedGuard>;
// When set to `true`, all the branches stop purging. It helps to reduce
// shutdown hangs.
std::atomic_bool g_no_purge = false;
} // namespace
template <bool thread_bound>
@@ -79,6 +86,14 @@ void SchedulerLoopQuarantineBranch<thread_bound>::Configure(
enable_zapping_ = config.enable_zapping;
leak_on_destruction_ = config.leak_on_destruction;
branch_capacity_in_bytes_ = config.branch_capacity_in_bytes;
// This bucket index can be invalid if "Neutral" distribution is in use,
// but value here is only for comparison and should be safe.
largest_bucket_index_ =
BucketIndexLookup::GetIndexForDenserBuckets(config.max_quarantine_size);
PA_CHECK(largest_bucket_index_ < BucketIndexLookup::kNumBuckets);
PA_CHECK(&allocator_root_->buckets[largest_bucket_index_] <=
&allocator_root_->sentinel_bucket);
}
template <bool thread_bound>
@@ -126,15 +141,21 @@ void SchedulerLoopQuarantineBranch<thread_bound>::Quarantine(
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(!being_destructed_);
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
if (!enable_quarantine_ || pause_quarantine_ ||
allocator_root_->IsDirectMappedBucket(slot_span->bucket)) [[unlikely]] {
if (!enable_quarantine_ || pause_quarantine_) [[unlikely]] {
return allocator_root_->RawFreeWithThreadCache(slot_start, object,
slot_span);
}
if (slot_span->bucket < &allocator_root_->buckets[0] ||
&allocator_root_->buckets[largest_bucket_index_] < slot_span->bucket)
[[unlikely]] {
// The allocation is direct-mapped or larger than `largest_bucket_index_`.
return allocator_root_->RawFreeWithThreadCache(slot_start, object,
slot_span);
}
PA_DCHECK(!allocator_root_->IsDirectMapped(slot_span));
const size_t slot_size = slot_span->bucket->slot_size;
const size_t bucket_index =
static_cast<size_t>(slot_span->bucket - allocator_root_->buckets);
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
if (capacity_in_bytes < slot_size) [[unlikely]] {
@@ -154,7 +175,8 @@ void SchedulerLoopQuarantineBranch<thread_bound>::Quarantine(
branch_size_in_bytes_ += slot_size;
slots_.push_back({
.slot_start = slot_start,
.bucket_index = bucket_index,
.bucket_index =
static_cast<size_t>(slot_span->bucket - allocator_root_->buckets),
});
// Swap randomly so that the quarantine list remain shuffled.
@@ -179,6 +201,10 @@ PA_ALWAYS_INLINE void
SchedulerLoopQuarantineBranch<thread_bound>::PurgeInternal(
size_t target_size_in_bytes,
[[maybe_unused]] bool for_destruction) {
if (g_no_purge.load(std::memory_order_relaxed)) {
return;
}
int64_t freed_count = 0;
int64_t freed_size_in_bytes = 0;
@@ -279,6 +305,12 @@ void SchedulerLoopQuarantineBranch<thread_bound>::DisallowScanlessPurge() {
PA_CHECK(disallow_scanless_purge_ > 0); // Overflow check.
}
// static
template <bool thread_bound>
void SchedulerLoopQuarantineBranch<thread_bound>::DangerouslyDisablePurge() {
g_no_purge.store(true, std::memory_order_relaxed);
}
template <bool thread_bound>
const SchedulerLoopQuarantineConfig&
SchedulerLoopQuarantineBranch<thread_bound>::GetConfigurationForTesting() {
@@ -78,6 +78,9 @@ struct SchedulerLoopQuarantineConfig {
bool leak_on_destruction = false;
bool enable_quarantine = false;
bool enable_zapping = false;
// Accepts allocations up to this bucket size. If the given number does not
// match bucket size, it is rounded up to next bucket size.
size_t max_quarantine_size = BucketIndexLookup::kMaxBucketSize;
// For informational purposes only.
char branch_name[32] = "";
};
@@ -161,6 +164,11 @@ class SchedulerLoopQuarantineBranch {
void AllowScanlessPurge();
void DisallowScanlessPurge();
// Once called, all the branches stop purging. This means every branch grows
// unbounded, potentially resulting in OOM. However, if we know the program
// is being terminated, this can help reduce hangs.
static void DangerouslyDisablePurge();
const SchedulerLoopQuarantineConfig& GetConfigurationForTesting();
class ScopedQuarantineExclusion {
@@ -211,6 +219,8 @@ class SchedulerLoopQuarantineBranch {
bool enable_zapping_ = false;
bool leak_on_destruction_ = false;
uint16_t largest_bucket_index_ = BucketIndexLookup::kNumBuckets - 1;
// When non-zero, this branch temporarily stops accepting incoming quarantine
// requests.
int pause_quarantine_ = 0;
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/scheduler_loop_quarantine_support.h"
namespace partition_alloc {
@@ -10,13 +10,13 @@
#ifndef PARTITION_ALLOC_SCHEDULER_LOOP_QUARANTINE_SUPPORT_H_
#define PARTITION_ALLOC_SCHEDULER_LOOP_QUARANTINE_SUPPORT_H_
#include <map>
#include <optional>
#include <variant>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/memory/stack_allocated.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/scheduler_loop_quarantine.h"
#include "partition_alloc/thread_cache.h"
@@ -84,8 +84,32 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t tcache_address_ = 0;
};
namespace internal {
// This is a lightweight version of `SchedulerLoopQuarantineScanPolicyUpdater`.
// It calls `DisallowScanlessPurge` in the constructor and `AllowScanlessPurge`
// in the destructor.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
ScopedSchedulerLoopQuarantineDisallowScanlessPurge {
// This is `PA_STACK_ALLOCATED()` to ensure that those two calls are made on
// the same thread, allowing us to omit thread-safety analysis.
PA_STACK_ALLOCATED();
public:
PA_ALWAYS_INLINE ScopedSchedulerLoopQuarantineDisallowScanlessPurge() {
ThreadCache* tcache = ThreadCache::EnsureAndGet();
PA_CHECK(ThreadCache::IsValid(tcache));
tcache->GetSchedulerLoopQuarantineBranch().DisallowScanlessPurge();
}
PA_ALWAYS_INLINE ~ScopedSchedulerLoopQuarantineDisallowScanlessPurge() {
ThreadCache* tcache = ThreadCache::EnsureAndGet();
PA_CHECK(ThreadCache::IsValid(tcache));
tcache->GetSchedulerLoopQuarantineBranch().AllowScanlessPurge();
}
};
namespace internal {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
ScopedSchedulerLoopQuarantineBranchAccessorForTesting {
public:
@@ -20,6 +20,9 @@ struct AllocatorDispatch {
using AllocFn = void*(size_t size, void* context);
using AllocUncheckedFn = void*(size_t size, void* context);
using AllocZeroInitializedFn = void*(size_t n, size_t size, void* context);
using AllocZeroInitializedUncheckedFn = void*(size_t n,
size_t size,
void* context);
using AllocAlignedFn = void*(size_t alignment, size_t size, void* context);
using ReallocFn = void*(void* address, size_t size, void* context);
using ReallocUncheckedFn = void*(void* ptr, size_t size, void* context);
@@ -60,6 +63,7 @@ struct AllocatorDispatch {
AllocFn* alloc_function;
AllocUncheckedFn* alloc_unchecked_function;
AllocZeroInitializedFn* alloc_zero_initialized_function;
AllocZeroInitializedUncheckedFn* alloc_zero_initialized_unchecked_function;
AllocAlignedFn* alloc_aligned_function;
ReallocFn* realloc_function;
ReallocUncheckedFn* realloc_unchecked_function;
@@ -133,6 +137,7 @@ struct AllocatorDispatch {
COPY_IF_NULLPTR(alloc_function);
COPY_IF_NULLPTR(alloc_unchecked_function);
COPY_IF_NULLPTR(alloc_zero_initialized_function);
COPY_IF_NULLPTR(alloc_zero_initialized_unchecked_function);
COPY_IF_NULLPTR(alloc_aligned_function);
COPY_IF_NULLPTR(realloc_function);
COPY_IF_NULLPTR(realloc_unchecked_function);
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_INTERCEPTION_APPLE_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_INTERCEPTION_APPLE_H_
@@ -66,6 +66,11 @@ void SetCallNewHandlerOnMallocFailure(bool value);
// regardless of SetCallNewHandlerOnMallocFailure().
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) void* UncheckedAlloc(size_t size);
// Allocates |n| zeroed elements of size |size| or returns nullptr. It does NOT
// call the new_handler, regardless of SetCallNewHandlerOnMallocFailure().
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM)
void* UncheckedCalloc(size_t n, size_t size);
// Reallocates |ptr| to point at |size| bytes with the same alignment as |ptr|,
// or returns nullptr while leaving the |ptr| unchanged. It does NOT call the
// new_handler, regardless of SetCallNewHandlerOnMallocFailure().
@@ -151,9 +156,6 @@ using EnableMemoryTagging =
enum class BucketDistribution : uint8_t { kNeutral, kDenser };
using EventuallyZeroFreedMemory = partition_alloc::internal::base::
StrongAlias<class EventuallyZeroFreedMemoryTag, bool>;
using FewerMemoryRegions =
partition_alloc::internal::base::StrongAlias<class FewerMemoryRegionsTag,
bool>;
// If |thread_cache_on_non_quarantinable_partition| is specified, the
// thread-cache will be enabled on the non-quarantinable partition. The
// thread-cache on the main (malloc) partition will be disabled.
@@ -168,8 +170,9 @@ void ConfigurePartitions(
scheduler_loop_quarantine_global_config,
partition_alloc::internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_thread_local_config,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
FewerMemoryRegions fewer_memory_regions);
partition_alloc::internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config,
EventuallyZeroFreedMemory eventually_zero_freed_memory);
PA_COMPONENT_EXPORT(ALLOCATOR_SHIM) uint32_t GetMainPartitionRootExtrasSize();
@@ -32,6 +32,10 @@
namespace allocator_shim {
void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
if (!ptr) [[unlikely]] {
return;
}
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
@@ -57,7 +61,8 @@ void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
}
// There must be an owner zone.
PA_CHECK(false);
PA_CHECK(false) << "Oops! No zone found for "
<< reinterpret_cast<uintptr_t>(ptr);
}
} // namespace allocator_shim
@@ -121,15 +121,16 @@ void TryFreeDefaultImpl(void* ptr, void* context) {
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&MallocImpl, /* alloc_function */
&MallocImpl, /* alloc_unchecked_function */
&CallocImpl, /* alloc_zero_initialized_function */
&MemalignImpl, /* alloc_aligned_function */
&ReallocImpl, /* realloc_function */
&ReallocImpl, /* realloc_unchecked_function */
&FreeImpl, /* free_function */
&FreeWithSizeImpl, /* free_with_size_function */
&FreeWithAlignmentImpl, /* free_with_size_function */
&MallocImpl, /* alloc_function */
&MallocImpl, /* alloc_unchecked_function */
&CallocImpl, /* alloc_zero_initialized_function */
&CallocImpl, /* alloc_zero_initialized_unchecked_function */
&MemalignImpl, /* alloc_aligned_function */
&ReallocImpl, /* realloc_function */
&ReallocImpl, /* realloc_unchecked_function */
&FreeImpl, /* free_function */
&FreeWithSizeImpl, /* free_with_size_function */
&FreeWithAlignmentImpl, /* free_with_size_function */
&FreeWithSizeAndAlignmentImpl, /* free_with_size_function */
&GetSizeEstimateImpl, /* get_size_estimate_function */
&GoodSizeImpl, /* good_size_function */
@@ -65,6 +65,15 @@ void* GlibcCalloc(size_t n, size_t size, void* context) {
return __libc_calloc(n, size);
}
void* GlibcUncheckedCalloc(size_t n, size_t size, void* context) {
const auto total = partition_alloc::internal::base::CheckMul(n, size);
if (!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize) [[unlikely]] {
return nullptr;
}
return __libc_calloc(n, size);
}
void* GlibcRealloc(void* address, size_t size, void* context) {
if (size >= kMaxAllowedSize) [[unlikely]] {
partition_alloc::TerminateBecauseOutOfMemory(size);
@@ -128,6 +137,7 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&GlibcMalloc, /* alloc_function */
&GlibcUncheckedMalloc, /* alloc_unchecked_function */
&GlibcCalloc, /* alloc_zero_initialized_function */
&GlibcUncheckedCalloc, /* alloc_zero_initialized_unchecked_function */
&GlibcMemalign, /* alloc_aligned_function */
&GlibcRealloc, /* realloc_function */
&GlibcUncheckedRealloc, /* realloc_unchecked_function */
@@ -76,15 +76,16 @@ size_t RealSizeEstimate(void* address, void* context) {
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&RealMalloc, /* alloc_function */
&RealMalloc, /* alloc_unchecked_function */
&RealCalloc, /* alloc_zero_initialized_function */
&RealMemalign, /* alloc_aligned_function */
&RealRealloc, /* realloc_function */
&RealRealloc, /* realloc_unchecked_function */
&RealFree, /* free_function */
&RealFreeWithSize, /* free_with_size_function */
&RealFreeWithAlignment, /* free_with_alignment_function */
&RealMalloc, /* alloc_function */
&RealMalloc, /* alloc_unchecked_function */
&RealCalloc, /* alloc_zero_initialized_function */
&RealCalloc, /* alloc_zero_initialized_unchecked_function */
&RealMemalign, /* alloc_aligned_function */
&RealRealloc, /* realloc_function */
&RealRealloc, /* realloc_unchecked_function */
&RealFree, /* free_function */
&RealFreeWithSize, /* free_with_size_function */
&RealFreeWithAlignment, /* free_with_alignment_function */
&RealFreeWithSizeAndAlignment, /* free_with_size_and_alignment_function */
&RealSizeEstimate, /* get_size_estimate_function */
nullptr, /* good_size_function */
@@ -241,6 +241,20 @@ void* PartitionAllocFunctionsInternal<base_alloc_flags,
total);
}
// static
template <partition_alloc::AllocFlags base_alloc_flags,
partition_alloc::FreeFlags base_free_flags>
void* PartitionAllocFunctionsInternal<base_alloc_flags, base_free_flags>::
CallocUnchecked(size_t n, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
const size_t total =
partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
return Allocator()
->AllocInline<base_alloc_flags |
partition_alloc::AllocFlags::kReturnNull |
partition_alloc::AllocFlags::kZeroFill>(total);
}
// static
template <partition_alloc::AllocFlags base_alloc_flags,
partition_alloc::FreeFlags base_free_flags>
@@ -643,8 +657,9 @@ void ConfigurePartitions(
scheduler_loop_quarantine_global_config,
partition_alloc::internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_thread_local_config,
EventuallyZeroFreedMemory eventually_zero_freed_memory,
FewerMemoryRegions fewer_memory_regions) {
partition_alloc::internal::SchedulerLoopQuarantineConfig
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config,
EventuallyZeroFreedMemory eventually_zero_freed_memory) {
// Calling Get() is actually important, even if the return value isn't
// used, because it has a side effect of initializing the variable, if it
// wasn't already.
@@ -672,13 +687,12 @@ void ConfigurePartitions(
eventually_zero_freed_memory
? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.fewer_memory_regions =
fewer_memory_regions ? partition_alloc::PartitionOptions::kEnabled
: partition_alloc::PartitionOptions::kDisabled;
opts.scheduler_loop_quarantine_global_config =
scheduler_loop_quarantine_global_config;
opts.scheduler_loop_quarantine_thread_local_config =
scheduler_loop_quarantine_thread_local_config;
opts.scheduler_loop_quarantine_for_advanced_memory_safety_checks_config =
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config;
opts.memory_tagging = {
.enabled = enable_memory_tagging
? partition_alloc::PartitionOptions::kEnabled
@@ -43,6 +43,8 @@ class PartitionAllocFunctionsInternal {
static void* Calloc(size_t n, size_t size, void* context);
static void* CallocUnchecked(size_t n, size_t size, void* context);
static void* Memalign(size_t alignment, size_t size, void* context);
static void* AlignedAlloc(size_t size, size_t alignment, void* context);
@@ -102,6 +104,7 @@ class PartitionAllocFunctionsInternal {
&Malloc, // alloc_function
&MallocUnchecked, // alloc_unchecked_function
&Calloc, // alloc_zero_initialized_function
&CallocUnchecked, // alloc_zero_initialized_unchecked_function
&Memalign, // alloc_aligned_function
&Realloc, // realloc_function
&ReallocUnchecked, // realloc_unchecked_function
@@ -191,15 +194,18 @@ PA_ALWAYS_INLINE void ConfigurePartitionsForTesting() {
partition_alloc::internal::SchedulerLoopQuarantineConfig();
auto scheduler_loop_quarantine_thread_local_config =
partition_alloc::internal::SchedulerLoopQuarantineConfig();
auto scheduler_loop_quarantine_for_advanced_memory_safety_checks_config =
partition_alloc::internal::SchedulerLoopQuarantineConfig();
auto eventually_zero_freed_memory = EventuallyZeroFreedMemory(false);
auto fewer_memory_regions = FewerMemoryRegions(false);
ConfigurePartitions(enable_brp, brp_extra_extras_size, enable_memory_tagging,
memory_tagging_reporting_mode, distribution,
scheduler_loop_quarantine_global_config,
scheduler_loop_quarantine_thread_local_config,
eventually_zero_freed_memory, fewer_memory_regions);
ConfigurePartitions(
enable_brp, brp_extra_extras_size, enable_memory_tagging,
memory_tagging_reporting_mode, distribution,
scheduler_loop_quarantine_global_config,
scheduler_loop_quarantine_thread_local_config,
scheduler_loop_quarantine_for_advanced_memory_safety_checks_config,
eventually_zero_freed_memory);
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_INTERNAL_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_INTERNAL_H_
@@ -42,6 +42,14 @@ void* DelegatedAllocZeroInitializedFn(size_t n, size_t size, void* context) {
context);
}
void* DelegatedAllocZeroInitializedUncheckedFn(size_t n,
size_t size,
void* context) {
const AllocatorDispatch* delegate = GetDelegate();
PA_MUSTTAIL return delegate->alloc_zero_initialized_unchecked_function(
n, size, context);
}
void* DelegatedAllocAlignedFn(size_t alignment, size_t size, void* context) {
const AllocatorDispatch* delegate = GetDelegate();
PA_MUSTTAIL return delegate->alloc_aligned_function(alignment, size, context);
@@ -227,6 +235,8 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
.alloc_function = &DelegatedAllocFn,
.alloc_unchecked_function = &DelegatedAllocUncheckedFn,
.alloc_zero_initialized_function = &DelegatedAllocZeroInitializedFn,
.alloc_zero_initialized_unchecked_function =
&DelegatedAllocZeroInitializedUncheckedFn,
.alloc_aligned_function = &DelegatedAllocAlignedFn,
.realloc_function = &DelegatedReallocFn,
.realloc_unchecked_function = &DelegatedReallocUncheckedFn,
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_WITH_ADVANCED_CHECKS_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_WITH_ADVANCED_CHECKS_H_
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
#include <cstddef>
@@ -39,6 +34,7 @@ AllocatorDispatch allocator_dispatch = {
nullptr, // alloc_function
nullptr, // alloc_unchecked_function
nullptr, // alloc_zero_initialized_function
nullptr, // alloc_zero_initialized_unchecked_function
nullptr, // alloc_aligned_function
nullptr, // realloc_function
nullptr, // realloc_unchecked_function
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DISPATCH_TO_NOOP_ON_FREE_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DISPATCH_TO_NOOP_ON_FREE_H_
@@ -72,6 +72,12 @@ void* UncheckedAlloc(size_t size) {
return chain_head->alloc_unchecked_function(size, nullptr);
}
void* UncheckedCalloc(size_t n, size_t size) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->alloc_zero_initialized_unchecked_function(n, size,
nullptr);
}
void* UncheckedRealloc(void* ptr, size_t size) {
const AllocatorDispatch* const chain_head = internal::GetChainHead();
return chain_head->realloc_unchecked_function(ptr, size, nullptr);
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
#define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_INTERNALS_H_

Some files were not shown because too many files have changed in this diff Show More