Update On Sun Aug 18 20:30:13 CEST 2024

This commit is contained in:
github-action[bot]
2024-08-18 20:30:13 +02:00
parent 34f260cfe3
commit 77df2e5cd4
2279 changed files with 67255 additions and 51193 deletions
+1 -1
View File
@@ -1 +1 @@
127.0.6533.64
128.0.6613.40
+10 -7
View File
@@ -61,7 +61,6 @@ Aldo Culquicondor <alculquicondor@gmail.com>
Alec Petridis <alecthechop@gmail.com>
Aleksandar Stojiljkovic <aleksandar.stojiljkovic@intel.com>
Aleksei Gurianov <gurianov@gmail.com>
Aleksey Khoroshilov <akhoroshilov@brave.com>
Alesandro Ortiz <alesandro@alesandroortiz.com>
Alessandro Astone <ales.astone@gmail.com>
Alex Chronopoulos <achronop@gmail.com>
@@ -209,7 +208,6 @@ Brendan Kirby <brendan.kirby@imgtec.com>
Brendan Long <self@brendanlong.com>
Brendon Tiszka <btiszka@gmail.com>
Brett Lewis <brettlewis@brettlewis.us>
Brian Clifton <clifton@brave.com>
Brian Dunn <brian@theophil.us>
Brian G. Merrell <bgmerrell@gmail.com>
Brian Konzman, SJ <b.g.konzman@gmail.com>
@@ -297,6 +295,7 @@ Daiwei Li <daiweili@suitabletech.com>
Damien Marié <damien@dam.io>
Dan McCombs <overridex@gmail.com>
Daniel Adams <msub2official@gmail.com>
Daniel Bertalan <dani@danielbertalan.dev>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Bomar <dbdaniel42@gmail.com>
Daniel Carvalho Liedke <dliedke@gmail.com>
@@ -311,6 +310,7 @@ Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
Daniel Shaulov <dshaulov@ptc.com>
Daniel Trebbien <dtrebbien@gmail.com>
Daniel Waxweiler <daniel.waxweiler@gmail.com>
Daniel Zhao <zhaodani@amazon.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com>
@@ -409,6 +409,7 @@ Erik Kurzinger <ekurzinger@gmail.com>
Erik Sjölund <erik.sjolund@gmail.com>
Eriq Augustine <eriq.augustine@gmail.com>
Ernesto Mudu <ernesto.mudu@gmail.com>
Ethan Chen <randomgamingdev@gmail.com>
Ethan Wong <bunnnywong@gmail.com>
Etienne Laurin <etienne@atnnn.com>
Eugene Kim <eugene70kim@gmail.com>
@@ -435,7 +436,6 @@ Finbar Crago <finbar.crago@gmail.com>
François Beaufort <beaufort.francois@gmail.com>
François Devatine <devatine@verizonmedia.com>
Francois Kritzinger <francoisk777@gmail.com>
Francois Marier <francois@brave.com>
Francois Rauch <leopardb@gmail.com>
Frankie Dintino <fdintino@theatlantic.com>
Franklin Ta <fta2012@gmail.com>
@@ -478,6 +478,7 @@ Greg Visser <gregvis@gmail.com>
Gregory Davis <gpdavis.chromium@gmail.com>
Grzegorz Czajkowski <g.czajkowski@samsung.com>
Guangzhen Li <guangzhen.li@intel.com>
Guobin Wu <wuguobin.1229@bytedance.com>
Gurpreet Kaur <k.gurpreet@samsung.com>
Gustav Tiger <gustav.tiger@sonymobile.com>
Gyuyoung Kim <gyuyoung.kim@navercorp.com>
@@ -631,6 +632,7 @@ Jesper Storm Bache <jsbache@gmail.com>
Jesper van den Ende <jespertheend@gmail.com>
Jesse Miller <jesse@jmiller.biz>
Jesus Sanchez-Palencia <jesus.sanchez-palencia.fernandez.fil@intel.com>
Jia Yu <yujia.1019@bytedance.com>
Jiadong Chen <chenjiadong@huawei.com>
Jiadong Zhu <jiadong.zhu@linaro.org>
Jiahao Lu <lujjjh@gmail.com>
@@ -686,6 +688,7 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com>
Jojo R <rjiejie@gmail.com>
Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me>
@@ -734,6 +737,7 @@ Junmin Zhu <junmin.zhu@intel.com>
Junsang Mo <mojunsang26@gmail.com>
Junsong Li <ljs.darkfish@gmail.com>
Jun Wang <wangjuna@uniontech.com>
Jun Xu <jun1.xu@intel.com>
Jun Zeng <hjunzeng6@gmail.com>
Justin Okamoto <justmoto@amazon.com>
Justin Ribeiro <justin@justinribeiro.com>
@@ -933,7 +937,6 @@ Matthieu Rigolot <matthieu.rigolot@gmail.com>
Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com>
Mattias Buelens <mattias.buelens@gmail.com>
Max Coplan <mchcopl@gmail.com>
Max Karolinskiy <max@brave.com>
Max Perepelitsyn <pph34r@gmail.com>
Max Schmitt <max@schmitt.mx>
Max Vujovic <mvujovic@adobe.com>
@@ -969,7 +972,6 @@ Mihai Tica <mitica@adobe.com>
Mike Pennisi <mike@mikepennisi.com>
Mike Tilburg <mtilburg@adobe.com>
Mikhail Pozdnyakov <mikhail.pozdnyakov@intel.com>
Mikhail Atuchin <matuchin@brave.com>
Milko Leporis <milko.leporis@imgtec.com>
Milton Chiang <milton.chiang@mediatek.com>
Milutin Smiljanic <msmiljanic.gm@gmail.com>
@@ -1110,7 +1112,6 @@ Po-Chun Chang <pochang0403@gmail.com>
Prakhar Shrivastav <p.shri@samsung.com>
Pramod Begur Srinath <pramod.bs@samsung.com>
Pranay Kumar <pranay.kumar@samsung.com>
Pranjal Jumde <pranjal@brave.com>
Prashant Hiremath <prashhir@cisco.com>
Prashant Nevase <prashant.n@samsung.com>
Prashant Patil <prashant.patil@imgtec.com>
@@ -1137,7 +1138,6 @@ Rahul Gupta <rahul.g@samsung.com>
Rahul Yadav <rahul.yadav@samsung.com>
Rajesh Mahindra <rmahindra@uber.com>
Rajneesh Rana <rajneesh.r@samsung.com>
Ralph Giles <rgiles@brave.com>
Raman Tenneti <raman.tenneti@gmail.com>
Ramkumar Gokarnesan <ramkumar.gokarnesan@gmail.com>
Ramkumar Ramachandra <artagnon@gmail.com>
@@ -1203,6 +1203,7 @@ Ryan Manuel <rfmanuel@gmail.com>
Ryan Norton <rnorton10@gmail.com>
Ryan Sleevi <ryan-chromium-dev@sleevi.com>
Ryan Yoakum <ryoakum@skobalt.com>
Ryan Huen <ryanhuenprivate@gmail.com>
Rye Zhang <ryezhang@tencent.com>
Ryo Ogawa <negibokken@gmail.com>
Ryuan Choi <ryuan.choi@samsung.com>
@@ -1386,6 +1387,7 @@ Takuya Kurimoto <takuya004869@gmail.com>
Tanay Chowdhury <tanay.c@samsung.com>
Tanvir Rizvi <tanvir.rizvi@samsung.com>
Tao Wang <tao.wang.2261@gmail.com>
Tao Xiong <taox4@illinois.edu>
Tapu Kumar Ghose <ghose.tapu@gmail.com>
Taylor Price <trprice@gmail.com>
Ted Kim <neot0000@gmail.com>
@@ -1611,6 +1613,7 @@ Akamai Inc. <*@akamai.com>
ARM Holdings <*@arm.com>
BlackBerry Limited <*@blackberry.com>
Bocoup <*@bocoup.com>
Brave Software Inc. <*@brave.com>
Canonical Limited <*@canonical.com>
Cloudflare, Inc. <*@cloudflare.com>
CloudMosa, Inc. <*@cloudmosa.com>
+635 -608
View File
File diff suppressed because it is too large Load Diff
+23 -11
View File
@@ -168,12 +168,6 @@ if (is_fuchsia) {
}
}
if (enable_pkeys && is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
}
buildflag_header("ios_cronet_buildflags") {
header = "ios_cronet_buildflags.h"
header_dir = "base/message_loop"
@@ -276,6 +270,7 @@ component("base") {
"containers/map_util.h",
"containers/small_map.h",
"containers/span.h",
"containers/span_or_size.h",
"containers/span_reader.h",
"containers/span_writer.h",
"containers/stack.h",
@@ -576,6 +571,8 @@ component("base") {
"profiler/stack_sampler.h",
"profiler/stack_sampling_profiler.cc",
"profiler/stack_sampling_profiler.h",
"profiler/stack_unwind_data.cc",
"profiler/stack_unwind_data.h",
"profiler/suspendable_thread_delegate.h",
"profiler/thread_delegate.h",
"profiler/unwinder.cc",
@@ -583,6 +580,7 @@ component("base") {
"rand_util.cc",
"rand_util.h",
"ranges/algorithm.h",
"ranges/from_range.h",
"ranges/functional.h",
"ranges/ranges.h",
"run_loop.cc",
@@ -629,7 +627,6 @@ component("base") {
"strings/string_number_conversions.cc",
"strings/string_number_conversions.h",
"strings/string_number_conversions_internal.h",
"strings/string_piece.h",
"strings/string_split.cc",
"strings/string_split.h",
"strings/string_split_internal.h",
@@ -825,7 +822,6 @@ component("base") {
"task/thread_pool/worker_thread_waitable_event.cc",
"task/thread_pool/worker_thread_waitable_event.h",
"task/updateable_sequenced_task_runner.h",
"template_util.h",
"test/scoped_logging_settings.h",
"test/spin_wait.h",
"third_party/cityhash/city.cc",
@@ -1063,6 +1059,10 @@ component("base") {
"//build/config/compiler:compiler_buildflags",
"//third_party/modp_b64",
]
if (!is_nacl) {
# Used by metrics/crc32, except on NaCl builds.
deps += [ "//third_party/zlib" ]
}
# `raw_ptr` cannot be made a component due to CRT symbol issues.
# Its gateway to being a component is through `//base`, so we have
@@ -1110,7 +1110,7 @@ component("base") {
]
# Base provides conversions between CXX types and base types (e.g.
# StringPiece).
# std::string_view).
public_deps += [ "//build/rust:cxx_cppdeps" ]
}
@@ -1265,7 +1265,6 @@ component("base") {
"android/jni_string.h",
"android/jni_utils.cc",
"android/jni_utils.h",
"android/jni_weak_ref.cc",
"android/jni_weak_ref.h",
"android/library_loader/library_loader_hooks.cc",
"android/library_loader/library_loader_hooks.h",
@@ -1282,6 +1281,17 @@ component("base") {
public_deps += [ "//third_party/jni_zero:jni_zero" ]
} # is_android || is_robolectric
if (is_android && !is_robolectric) {
# The robolectic toolchain doesn't provide the NDK headers required for data
# and function type definitions used by BinderApi et al.
sources += [
"android/binder.cc",
"android/binder.h",
"android/binder_box.cc",
"android/binder_box.h",
]
}
# Chromeos.
if (is_chromeos) {
sources += [
@@ -2517,7 +2527,7 @@ buildflag_header("protected_memory_buildflags") {
header_dir = "base/memory"
# Currently Protected Memory is only supported on Windows.
protected_memory_enabled = is_win
protected_memory_enabled = !is_component_build && is_win
flags = [ "PROTECTED_MEMORY_ENABLED=$protected_memory_enabled" ]
}
@@ -2560,6 +2570,8 @@ buildflag_header("sanitizer_buildflags") {
flags = [
"IS_HWASAN=$is_hwasan",
"IS_UBSAN=$is_ubsan",
"IS_UBSAN_SECURITY=$is_ubsan_security",
"USING_SANITIZER=$using_sanitizer",
]
}
+1
View File
@@ -25,6 +25,7 @@ include_rules = [
"+third_party/test_fonts",
# JSON Deserialization.
"+third_party/rust/serde_json_lenient/v0_2/wrapper",
"+third_party/zlib",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",
-1
View File
@@ -1,5 +1,4 @@
lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
@@ -5,7 +5,7 @@
#include "base/allocator/allocator_check.h"
#include "build/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if BUILDFLAG(IS_WIN)
#include "partition_alloc/shim/winheap_stubs_win.h"
@@ -8,7 +8,7 @@
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/no_destructor.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/shim/allocator_shim.h"
#if DCHECK_IS_ON()
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher::internal {
@@ -7,7 +7,7 @@
#include "base/base_export.h"
#include "build/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_hooks.h"
@@ -13,7 +13,7 @@
#include "base/allocator/dispatcher/subsystem.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_allocation_data.h"
@@ -184,6 +184,20 @@ struct DispatcherImpl {
return reallocated_address;
}
static void* ReallocUncheckedFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
void* const reallocated_address = self->next->realloc_unchecked_function(
self->next, address, size, context);
DoNotifyAllocationForShim(reallocated_address, size);
return reallocated_address;
}
static void FreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
@@ -193,7 +207,7 @@ struct DispatcherImpl {
// the address becomes available and can be allocated by another thread.
// That would be racy otherwise.
DoNotifyFreeForShim(address);
self->next->free_function(self->next, address, context);
MUSTTAIL return self->next->free_function(self->next, address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
@@ -217,8 +231,8 @@ struct DispatcherImpl {
DoNotifyFreeForShim(to_be_freed[i]);
}
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
MUSTTAIL return self->next->batch_free_function(self->next, to_be_freed,
num_to_be_freed, context);
}
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
@@ -226,14 +240,16 @@ struct DispatcherImpl {
size_t size,
void* context) {
DoNotifyFreeForShim(address);
self->next->free_definite_size_function(self->next, address, size, context);
MUSTTAIL return self->next->free_definite_size_function(self->next, address,
size, context);
}
static void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
DoNotifyFreeForShim(address);
self->next->try_free_default_function(self->next, address, context);
MUSTTAIL return self->next->try_free_default_function(self->next, address,
context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
@@ -248,6 +264,18 @@ struct DispatcherImpl {
return address;
}
static void* AlignedMallocUncheckedFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
void* const address = self->next->aligned_malloc_unchecked_function(
self->next, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
@@ -263,11 +291,27 @@ struct DispatcherImpl {
return address;
}
static void* AlignedReallocUncheckedFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
// Note: size == 0 actually performs free.
DoNotifyFreeForShim(address);
address = self->next->aligned_realloc_unchecked_function(
self->next, address, size, alignment, context);
DoNotifyAllocationForShim(address, size);
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
DoNotifyFreeForShim(address);
self->next->aligned_free_function(self->next, address, context);
MUSTTAIL return self->next->aligned_free_function(self->next, address,
context);
}
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
@@ -308,23 +352,26 @@ std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function
FreeFn, // free_function
nullptr, // get_size_estimate_function
nullptr, // good_size_function
nullptr, // claimed_address_function
BatchMallocFn, // batch_malloc_function
BatchFreeFn, // batch_free_function
FreeDefiniteSizeFn, // free_definite_size_function
TryFreeDefaultFn, // try_free_default_function
AlignedMallocFn, // aligned_malloc_function
AlignedReallocFn, // aligned_realloc_function
AlignedFreeFn, // aligned_free_function
nullptr // next
AllocFn, // alloc_function
AllocUncheckedFn, // alloc_unchecked_function
AllocZeroInitializedFn, // alloc_zero_initialized_function
AllocAlignedFn, // alloc_aligned_function
ReallocFn, // realloc_function
ReallocUncheckedFn, // realloc_unchecked_function
FreeFn, // free_function
nullptr, // get_size_estimate_function
nullptr, // good_size_function
nullptr, // claimed_address_function
BatchMallocFn, // batch_malloc_function
BatchFreeFn, // batch_free_function
FreeDefiniteSizeFn, // free_definite_size_function
TryFreeDefaultFn, // try_free_default_function
AlignedMallocFn, // aligned_malloc_function
AlignedMallocUncheckedFn, // aligned_malloc_unchecked_function
AlignedReallocFn, // aligned_realloc_function
AlignedReallocUncheckedFn, // aligned_realloc_unchecked_function
AlignedFreeFn, // aligned_free_function
nullptr // next
};
#endif // PA_BUILDFLAG(USE_ALLOCATOR_SHIM)
@@ -10,7 +10,7 @@
#include "base/allocator/dispatcher/memory_tagging.h"
#include "base/allocator/dispatcher/subsystem.h"
#include "base/base_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace base::allocator::dispatcher {
@@ -7,7 +7,7 @@
#include <mach/mach.h>
#include <malloc/malloc.h>
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/shim/early_zone_registration_constants.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
@@ -13,8 +13,8 @@
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
@@ -71,25 +71,7 @@ const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kDanglingPtrTypeOption,
};
#if PA_BUILDFLAG(USE_STARSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize",
@@ -266,37 +248,6 @@ BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
"LowerPAMemoryLimitForNonMainRenderers",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // PA_BUILDFLAG(STACK_SCAN_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
// ... -> PartitionPurgeSlotSpan().
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
@@ -9,11 +9,10 @@
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/time/time.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_root.h"
namespace base {
@@ -59,13 +58,7 @@ enum class DanglingPtrType {
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
@@ -162,11 +155,6 @@ extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
kPartitionAllocBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
@@ -42,12 +42,12 @@
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#include "partition_alloc/allocation_guard.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -60,14 +60,6 @@
#include "partition_alloc/stack/stack.h"
#include "partition_alloc/thread_cache.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/shim/nonscannable_allocator.h"
#include "partition_alloc/starscan/pcscan.h"
#include "partition_alloc/starscan/pcscan_scheduling.h"
#include "partition_alloc/starscan/stats_collector.h"
#include "partition_alloc/starscan/stats_reporter.h"
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h"
#endif
@@ -120,129 +112,10 @@ constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
namespace switches {
[[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
constexpr char kZygoteProcess[] = "zygote";
#if PA_BUILDFLAG(USE_STARSCAN)
constexpr char kGpuProcess[] = "gpu-process";
constexpr char kUtilityProcess[] = "utility";
#endif
} // namespace switches
#if PA_BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString(
partition_alloc::internal::StatsCollector::ScannerId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear";
case partition_alloc::internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan";
case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep";
case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner";
case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable();
}
}
constexpr const char* MutatorIdToTracingString(
partition_alloc::internal::StatsCollector::MutatorId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear";
case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack";
case partition_alloc::internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan";
case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator";
case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable();
}
}
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter {
public:
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::ScannerId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = ScannerIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::MutatorId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = MutatorIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportSurvivedQuarantineSize(size_t survived_size) override {
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
survived_size);
}
void ReportSurvivedQuarantinePercent(double survived_rate) override {
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
// divide back.
// TODO(bikineev): Remove after switching to perfetto.
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
1000 * survived_rate);
}
void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
TimeDelta sample = Microseconds(sample_in_usec);
UmaHistogramTimes(stats_name, sample);
}
private:
static constexpr char kTraceCategory[] = "partition_alloc";
};
#endif // PA_BUILDFLAG(USE_STARSCAN)
} // namespace
#if PA_BUILDFLAG(USE_STARSCAN)
void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter;
static bool registered = false;
DCHECK(!registered);
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
namespace {
void RunThreadCachePeriodicPurge() {
@@ -551,7 +424,7 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
for (size_t i = 0; i < lines.size(); ++i) {
for (const auto& patterns : callee_patterns) {
if (ranges::all_of(patterns, [&](std::string_view pattern) {
return lines[i].find(pattern) != StringPiece::npos;
return lines[i].find(pattern) != std::string_view::npos;
})) {
caller_index = i + 1;
}
@@ -855,96 +728,6 @@ void InstallUnretainedDanglingRawPtrChecks() {
}
}
namespace {
#if PA_BUILDFLAG(USE_STARSCAN)
void SetProcessNameForPCScan(const std::string& process_type) {
const char* name = [&process_type] {
if (process_type.empty()) {
// Empty means browser process.
return "Browser";
}
if (process_type == switches::kRendererProcess) {
return "Renderer";
}
if (process_type == switches::kGpuProcess) {
return "Gpu";
}
if (process_type == switches::kUtilityProcess) {
return "Utility";
}
return static_cast<const char*>(nullptr);
}();
if (name) {
partition_alloc::internal::PCScan::SetProcessName(name);
}
}
bool EnablePCScanForMallocPartitionsIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&base::PlatformThread::SetName);
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(base::features::kPartitionAllocPCScan)) {
allocator_shim::EnablePCScan({Config::WantedWriteProtectionMode::kEnabled,
Config::SafepointMode::kEnabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
bool EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanBrowserOnly)) {
const Config::WantedWriteProtectionMode wp_mode =
base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
? Config::WantedWriteProtectionMode::kEnabled
: Config::WantedWriteProtectionMode::kDisabled;
#if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
<< "DCScan is currently only supported on Linux based systems";
#endif
allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kEnabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
using Config = partition_alloc::internal::PCScan::InitConfig;
DCHECK(base::FeatureList::GetInstance());
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanRendererOnly)) {
const Config::WantedWriteProtectionMode wp_mode =
base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
? Config::WantedWriteProtectionMode::kEnabled
: Config::WantedWriteProtectionMode::kDisabled;
#if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
<< "DCScan is currently only supported on Linux based systems";
#endif
allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kDisabled});
base::allocator::RegisterPCScanStatsReporter();
return true;
}
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
} // namespace
void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
DCHECK_NE(process_type, switches::kZygoteProcess);
// TODO(keishi): Move the code to enable BRP back here after Finch
@@ -1279,62 +1062,16 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// 100 is a reasonable cap for this value.
UmaHistogramCounts100("Memory.PartitionAlloc.PartitionRoot.ExtrasSize",
int(extras_size));
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false;
#if PA_BUILDFLAG(USE_STARSCAN)
if (!brp_config.enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process.
if (process_type.empty()) {
scan_enabled = scan_enabled ||
EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded();
}
if (process_type == switches::kRendererProcess) {
scan_enabled = scan_enabled ||
EnablePCScanForMallocPartitionsInRendererProcessIfNeeded();
}
if (scan_enabled) {
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanStackScanning)) {
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
partition_alloc::internal::PCScan::EnableStackScanning();
#endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanImmediateFreeing)) {
partition_alloc::internal::PCScan::EnableImmediateFreeing();
}
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanEagerClearing)) {
partition_alloc::internal::PCScan::SetClearType(
partition_alloc::internal::PCScan::ClearType::kEager);
}
SetProcessNameForPCScan(process_type);
}
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_BUILDFLAG(USE_STARSCAN)
#if !defined(__MUSL__)
// This call causes hanging in pthread_getattr_np() under qemu-user, see
// https://www.openwall.com/lists/musl/2017/06/15/9.
partition_alloc::internal::StackTopRegistry::Get().NotifyThreadCreated(
partition_alloc::internal::GetStackTop());
#endif
// Non-quarantinable partition is dealing with hot V8's zone allocations.
// In case PCScan is enabled in Renderer, enable thread cache on this
// partition. At the same time, thread cache on the main(malloc) partition
// must be disabled, because only one partition can have it on.
if (scan_enabled && process_type == switches::kRendererProcess) {
allocator_shim::NonQuarantinableAllocator::Instance()
.root()
->EnableThreadCacheIfSupported();
} else
#endif // PA_BUILDFLAG(USE_STARSCAN)
{
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported();
}
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported();
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
@@ -1436,20 +1173,6 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
// PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_BUILDFLAG(USE_STARSCAN)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanMUAwareScheduler)) {
// Assign PCScan a task-based scheduling backend.
static base::NoDestructor<
partition_alloc::internal::MUAwareTaskBasedBackend>
mu_aware_task_based_backend{
partition_alloc::internal::PCScan::scheduler(),
&partition_alloc::internal::PCScan::PerformDelayedScan};
partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
*mu_aware_task_based_backend.get());
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::allocator::StartMemoryReclaimer(
base::SingleThreadTaskRunner::GetCurrentDefault());
@@ -14,16 +14,12 @@
#include "base/synchronization/lock.h"
#include "base/task/sequenced_task_runner.h"
#include "base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/thread_cache.h"
namespace base::allocator {
#if PA_BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge();
@@ -9,35 +9,48 @@ noparent = True
# `partition_alloc` can depend only on itself, via its `include_dirs`.
include_rules = [ "+partition_alloc" ]
# TODO(crbug.com/40158212): Depending on what is tested, split the tests in
# between chromium and partition_alloc. Remove those exceptions:
specific_include_rules = {
".*_(perf|unit)test\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",
"+base/timer/lap_timer.h",
"+base/win/windows_version.h",
# Dependencies on //testing:
".*_(perf|unit)?test.*\.(h|cc)": [
"+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h",
],
"extended_api\.cc$": [
"gtest_util.h": [
"+testing/gtest/include/gtest/gtest.h",
],
# Dependencies on //base:
"extended_api\.cc": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
],
"raw_(ptr|ref)_unittest\.cc$": [
"+base",
"+third_party/abseil-cpp/absl/types/optional.h",
"+third_party/abseil-cpp/absl/types/variant.h",
"partition_alloc_perftest\.cc": [
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/timer/lap_timer.h",
],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
"partition_lock_perftest\.cc": [
"+base/timer/lap_timer.h",
],
"use_death_tests\.h$": [
"+testing/gtest/include/gtest/gtest.h",
"raw_ptr_unittest\.cc": [
"+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_alloc_support.h",
"+base/cpu.h",
"+base/debug/asan_service.h",
"+base/metrics/histogram_base.h",
"+base/test/bind.h",
"+base/test/gtest_util.h",
"+base/test/memory/dangling_ptr_instrumentation.h",
"+base/test/scoped_feature_list.h",
"+base/types/to_address.h",
],
"raw_ref_unittest\.cc": [
"+base/debug/asan_service.h",
"+base/memory/raw_ptr_asan_service.h",
"+base/test/gtest_util.h",
],
}
@@ -13,6 +13,16 @@ PRESUBMIT_VERSION = '2.0.0'
# chromium repository. PRESUBMIT.py is executed from chromium.
_PARTITION_ALLOC_BASE_PATH = 'base/allocator/partition_allocator/src/'
# Filter for C/C++ files.
def c_cpp_files(file):
return file.LocalPath().endswith(('.h', '.hpp', '.c', '.cc', '.cpp'))
# Filter for GN files.
def gn_files(file):
return file.LocalPath().endswith(('.gn', '.gni'))
# This is adapted from Chromium's PRESUBMIT.py. The differences are:
# - Base path: It is relative to the partition_alloc's source directory instead
# of chromium.
@@ -99,25 +109,104 @@ def CheckForIncludeGuards(input_api, output_api):
# overrides the default build settings and forward the dependencies to
# partition_alloc.
def CheckNoExternalImportInGn(input_api, output_api):
def gn_files(file):
return file.LocalPath().endswith('.gn') or \
file.LocalPath().endswith('.gni')
# Match and capture <path> from import("<path>").
import_re = input_api.re.compile(r'^ *import\("([^"]+)"\)')
errors = []
for f in input_api.AffectedSourceFiles(gn_files):
for line_number, line in enumerate(input_api.ReadFile(f).splitlines()):
for line_number, line in f.ChangedContents():
match = import_re.search(line)
if not match:
continue
import_path = match.group(1)
if import_path.startswith('//build_overrides/'):
continue;
continue
if not import_path.startswith('//'):
continue;
errors.append(output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallow external import: %s' %
(f.LocalPath(), line_number + 1, import_path)))
return errors;
# partition_alloc still supports C++17, because Skia still uses C++17.
def CheckCpp17CompatibleHeaders(input_api, output_api):
CPP_20_HEADERS = [
"barrier",
"bit",
"compare",
"format",
"numbers",
"ranges",
"semaphore",
"source_location",
"span",
"stop_token",
"syncstream",
"version",
]
CPP_23_HEADERS = [
"expected",
"flat_map",
"flat_set",
"generator",
"mdspan",
"print",
"spanstream",
"stacktrace",
"stdatomic.h",
"stdfloat",
]
errors = []
for f in input_api.AffectedSourceFiles(c_cpp_files):
# for line_number, line in f.ChangedContents():
for line_number, line in enumerate(f.NewContents()):
for header in CPP_20_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
for header in CPP_23_HEADERS:
if not "#include <%s>" % header in line:
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++23 headers: <%s>'
% (f.LocalPath(), line_number + 1, header)))
return errors
def CheckCpp17CompatibleKeywords(input_api, output_api):
CPP_20_KEYWORDS = [
"concept",
"consteval",
"constinit",
"co_await",
"co_return",
"co_yield",
"requires",
]
# Note: C++23 doesn't introduce new keywords.
errors = []
for f in input_api.AffectedSourceFiles(c_cpp_files):
for line_number, line in f.ChangedContents():
for keyword in CPP_20_KEYWORDS:
if not keyword in line:
continue
# Skip if part of a comment
if '//' in line and line.index('//') < line.index(keyword):
continue
# Make sure there are word separators around the keyword:
regex = r'\b%s\b' % keyword
if not input_api.re.search(regex, line):
continue
errors.append(
output_api.PresubmitError(
'%s:%d\nPartitionAlloc disallows C++20 keywords: %s'
% (f.LocalPath(), line_number + 1, keyword)))
return errors
@@ -119,7 +119,7 @@ partition page that holds metadata (32B struct per partition page).
of each super page).
* In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
`MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation.
@@ -1,9 +0,0 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file will be used to check out PartitionAlloc and to build it as
# standalone library. In this case, PartitionAlloc needs to define
# build_with_chromium. If building PartitionAlloc as a part of chromium,
# chromium will provide build_with_chromium=true.
build_with_chromium = false
@@ -2,7 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build_overrides/build.gni")
# By definition, PartitionAlloc standalone builds outside of chromium.
build_with_chromium = false
# This is the default build configuration when building PartitionAlloc
# as a standalone library.
@@ -14,9 +15,12 @@ use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
assert_cpp20_default = true
# This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true
raw_ptr_zero_on_move_default = true
raw_ptr_zero_on_destruct_default = false
# PartitionAlloc needs to support cpp17 for standalone builds, as long as Skia
# supports it.
assert_cpp20_default = false
@@ -169,7 +169,7 @@ tracking a non-contiguous set of allocations using a bitmap.
The usable area of a super page in which slot spans
reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of
other metadata (e.g. StarScan bitmaps) can bump the starting offset
other metadata can bump the starting offset
forward. While this term is entrenched in the code, the team
considers it suboptimal and is actively looking for a replacement.
@@ -8,7 +8,6 @@
# - skia: //gn/BUILDCONFIG.gn
# - chromium: //build/config/BUILDCONFIG.gn
is_partition_alloc_standalone = true
build_with_chromium = false
is_asan = false
@@ -13,7 +13,7 @@ config("default") {
"-fvisibility=hidden",
]
cflags_cc = [
"-std=c++20",
"-std=c++17",
"-fvisibility-inlines-hidden",
]
cflags_objcc = cflags_cc
@@ -101,7 +101,8 @@ declare_args() {
# Puts the regular and BRP pools right next to each other, so that we can
# check "belongs to one of the two pools" with a single bitmask operation.
glue_core_pools = false
# TODO(crbug.com/350104111): Fix ios-simulator failures to remove `!is_ios`.
glue_core_pools = use_partition_alloc_as_malloc && !is_ios
# Introduces pointer compression support in PA. These are 4-byte
# pointers that can point within the core pools (regular and BRP).
@@ -245,9 +246,6 @@ declare_args() {
use_full_mte = false
}
# *Scan is currently only used by Chromium, and supports only 64-bit.
use_starscan = build_with_chromium && has_64_bit_pointers
stack_scan_supported =
current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
current_cpu == "arm64" || current_cpu == "riscv64"
@@ -260,8 +258,11 @@ stack_scan_supported =
# Do not clear the following, as they can function outside of PartitionAlloc
# - has_64_bit_pointers
# - has_memory_tagging
if (!use_partition_alloc) {
if (!use_partition_alloc ||
(defined(toolchain_allows_use_partition_alloc_as_malloc) &&
!toolchain_allows_use_partition_alloc_as_malloc)) {
use_partition_alloc_as_malloc = false
glue_core_pools = false
enable_backup_ref_ptr_support = false
use_raw_ptr_backup_ref_impl = false
use_asan_backup_ref_ptr = false
@@ -272,8 +273,8 @@ if (!use_partition_alloc) {
enable_dangling_raw_ptr_feature_flag = false
enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false
backup_ref_ptr_extra_oob_checks = false
enable_backup_ref_ptr_instance_tracer = false
use_starscan = false
use_full_mte = false
}
@@ -409,17 +410,3 @@ declare_args() {
# Embedders may opt-out of using C++ 20 build.
assert_cpp20 = assert_cpp20_default
}
declare_args() {
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
#
# This is being exposed as a GN arg because of an undiagnosed crashy
# interaction with Mac PGO builders: crbug.com/338094768#comment20
use_freelist_dispatcher = has_64_bit_pointers && false
}
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")
@@ -15,7 +15,30 @@ if (!defined(partition_alloc_remove_configs)) {
partition_alloc_remove_configs = []
}
# Add partition_alloc.gni and import it for partition_alloc configs.
# Enables compilation of the freelist dispatcher, which we'll use to
# carry out runtime evaluation of PartitionAlloc's two freelist
# implementations: the existing encoded-next freelist and the new
# pool offset freelist. When false, the latter is not built.
use_freelist_dispatcher = has_64_bit_pointers
assert(has_64_bit_pointers || !use_freelist_dispatcher,
"freelist dispatcher can't be used without 64-bit pointers")
record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pools are a logical concept when address space is 32-bit.
glue_core_pools = glue_core_pools && has_64_bit_pointers
# Pointer compression requires 64-bit pointers.
enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, but avails it
# as a buildflag.
dchecks_are_on = is_debug || dcheck_always_on
# TODO(crbug.com/40276913): Split PartitionAlloc into a public and
# private parts. The public config would include add the "./include" dir and
@@ -90,11 +113,81 @@ config("wexit_time_destructors") {
}
}
source_set("buildflag_macro") {
sources = [ "buildflag.h" ]
public_configs = [ ":public_includes" ]
}
pa_buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
"ASSERT_CPP_20=$assert_cpp20",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_GWP_ASAN_SUPPORT=$enable_gwp_asan_support",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"ENABLE_POINTER_COMPRESSION=$enable_pointer_compression",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"GLUE_CORE_POOLS=$glue_core_pools",
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"IS_CASTOS=$is_castos",
"IS_CAST_ANDROID=$is_cast_android",
"IS_CHROMEOS=$is_chromeos",
"IS_DEBUG=$is_debug",
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RECORD_ALLOC_INFO=$record_alloc_info",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"USE_FULL_MTE=$use_full_mte",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"DCHECKS_ARE_ON=$dchecks_are_on",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
# TODO(crbug.com/41481467): Remove this alias.
# Temporary alias, the time to update partition_alloc dependants.
# Currently needed by pdfium and dawn.
source_set("partition_alloc_buildflags") {
public = [ "partition_alloc_buildflags.h" ]
public_deps = [ ":buildflags" ]
}
# Provides platform and architecture detections from the compiler defines.
source_set("build_config") {
sources = [
"build_config.h",
"buildflag.h",
]
public_deps = [
":buildflag_macro", # Provides 'PA_BUILDFLAG()' macro.
":buildflags", # Provides `IS_CHROMEOS` definition.
]
public_configs = [ ":public_includes" ]
}
component("raw_ptr") {
@@ -146,135 +239,6 @@ component("raw_ptr") {
configs += [ ":dependants_extra_warnings" ]
}
pa_buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"
_record_alloc_info = false
# GWP-ASan is tied to BRP's enablement.
_enable_gwp_asan_support = enable_backup_ref_ptr_support
# Pools are a logical concept when address space is 32-bit.
_glue_core_pools = glue_core_pools && has_64_bit_pointers
# Pointer compression requires 64-bit pointers.
_enable_pointer_compression =
enable_pointer_compression_support && has_64_bit_pointers
# TODO(crbug.com/40158212): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# PartitionAlloc. For PartitionAlloc,
# gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
# defines and PartitionAlloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"HAS_MEMORY_TAGGING=$has_memory_tagging",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"USE_RAW_PTR_BACKUP_REF_IMPL=$use_raw_ptr_backup_ref_impl",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_RAW_PTR_ASAN_UNOWNED_IMPL=$use_raw_ptr_asan_unowned_impl",
"USE_RAW_PTR_HOOKABLE_IMPL=$use_raw_ptr_hookable_impl",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
# TODO(bartekn): Remove once PDFium switches to
# USE_RAW_PTR_ASAN_UNOWNED_IMPL.
"USE_ASAN_UNOWNED_PTR=$use_raw_ptr_asan_unowned_impl",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
"USE_FULL_MTE=$use_full_mte",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$_glue_core_pools",
"ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_FREELIST_DISPATCHER=$use_freelist_dispatcher",
"USE_STARSCAN=$use_starscan",
"STACK_SCAN_SUPPORTED=$stack_scan_supported",
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
"ASSERT_CPP_20=$assert_cpp20",
"IS_DEBUG=$is_debug",
]
}
pa_buildflag_header("raw_ptr_buildflags") {
header = "raw_ptr_buildflags.h"
flags = [
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
]
}
pa_buildflag_header("chromecast_buildflags") {
header = "chromecast_buildflags.h"
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_IS_CAST_ANDROID=$is_cast_android",
"PA_IS_CASTOS=$is_castos",
]
}
pa_buildflag_header("chromeos_buildflags") {
header = "chromeos_buildflags.h"
flags = [ "IS_CHROMEOS=$is_chromeos" ]
}
pa_buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
# but avails it as a buildflag.
_dcheck_is_on = is_debug || dcheck_always_on
# TODO(https://crbug.com/41481467): Remove the "PA_" prefix, because it
# will already be part of the define and the `PA_BUILDFLAG` macro.
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
group("buildflags") {
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":raw_ptr_buildflags",
]
public_configs = [ ":public_includes" ]
}
if (is_clang_or_gcc) {
config("partition_alloc_implementation") {
# See also: `partition_alloc_base/component_export.h`
@@ -463,29 +427,6 @@ if (is_clang_or_gcc) {
"yield_processor.h",
]
if (use_starscan) {
sources += [
"starscan/logging.h",
"starscan/pcscan.cc",
"starscan/pcscan.h",
"starscan/pcscan_internal.cc",
"starscan/pcscan_internal.h",
"starscan/pcscan_scheduling.cc",
"starscan/pcscan_scheduling.h",
"starscan/raceful_worklist.h",
"starscan/scan_loop.h",
"starscan/snapshot.cc",
"starscan/snapshot.h",
"starscan/starscan_fwd.h",
"starscan/state_bitmap.h",
"starscan/stats_collector.cc",
"starscan/stats_collector.h",
"starscan/stats_reporter.h",
"starscan/write_protector.cc",
"starscan/write_protector.h",
]
}
defines = []
if (is_win) {
sources += [
@@ -534,10 +475,7 @@ if (is_clang_or_gcc) {
public_deps = [
":build_config",
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":buildflags",
]
configs += [
@@ -547,10 +485,6 @@ if (is_clang_or_gcc) {
]
deps = [ ":allocator_base" ]
public_configs = []
if (is_android) {
# tagging.cc requires __arm_mte_set_* functions.
deps += [ "//third_party/cpu_features:ndk_compat" ]
}
if (is_fuchsia) {
deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
@@ -672,7 +606,9 @@ if (is_clang_or_gcc) {
"partition_alloc_base/threading/platform_thread_win.cc",
"partition_alloc_base/time/time_win.cc",
]
} else if (is_posix) {
}
if (is_posix) {
sources += [
"partition_alloc_base/debug/stack_trace_posix.cc",
"partition_alloc_base/files/file_util.h",
@@ -703,7 +639,9 @@ if (is_clang_or_gcc) {
} else {
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
}
} else if (is_fuchsia) {
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
"partition_alloc_base/fuchsia/fuchsia_logging.h",
@@ -717,6 +655,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/time/time_fuchsia.cc",
]
}
if (is_android) {
# Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true.
@@ -729,6 +668,7 @@ if (is_clang_or_gcc) {
"partition_alloc_base/native_library_posix.cc",
]
}
if (is_apple) {
# Apple-specific utilities
sources += [
@@ -757,10 +697,7 @@ if (is_clang_or_gcc) {
public_deps = [
":build_config",
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":buildflags",
]
public_configs = [ ":public_includes" ]
configs += [
@@ -819,14 +756,10 @@ if (is_clang_or_gcc) {
"shim/allocator_shim_dispatch_to_noop_on_free.h",
]
if (use_partition_alloc) {
shim_sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
"shim/nonscannable_allocator.cc",
]
shim_headers += [
"shim/allocator_shim_default_dispatch_to_partition_alloc.h",
"shim/nonscannable_allocator.h",
]
shim_sources +=
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.cc" ]
shim_headers +=
[ "shim/allocator_shim_default_dispatch_to_partition_alloc.h" ]
}
if (is_android) {
shim_headers += [
@@ -929,6 +862,199 @@ if (is_clang_or_gcc) {
]
}
} # if (is_clang_or_gcc)
# TODO(crbug.com/40158212): After making partition_alloc a standalone library,
# move test code here. i.e. test("partition_alloc_tests") { ... } and
# test("partition_alloc_perftests").
# TODO(crbug.com/40158212): Consider supporting building tests outside of
# chromium and having a dedicated 'partition_alloc_unittests' target.
if (build_with_chromium) {
source_set("unittests") {
testonly = true
sources = [ "partition_alloc_base/test/gtest_util.h" ]
if (is_linux || is_chromeos || is_android) {
sources += [
"partition_alloc_base/debug/proc_maps_linux.cc",
"partition_alloc_base/debug/proc_maps_linux.h",
]
}
if (is_android) {
sources += [
"partition_alloc_base/files/file_path_pa_unittest.cc",
"partition_alloc_base/native_library_pa_unittest.cc",
]
}
if (use_partition_alloc) {
sources += [
"address_pool_manager_unittest.cc",
"address_space_randomization_unittest.cc",
"compressed_pointer_unittest.cc",
"freeslot_bitmap_unittest.cc",
"hardening_unittest.cc",
"lightweight_quarantine_unittest.cc",
"memory_reclaimer_unittest.cc",
"page_allocator_unittest.cc",
"partition_alloc_base/bits_pa_unittest.cc",
"partition_alloc_base/component_export_pa_unittest.cc",
"partition_alloc_base/cpu_pa_unittest.cc",
"partition_alloc_base/logging_pa_unittest.cc",
"partition_alloc_base/no_destructor_pa_unittest.cc",
"partition_alloc_base/rand_util_pa_unittest.cc",
"partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
"partition_alloc_base/strings/cstring_builder_pa_unittest.cc",
"partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
"partition_alloc_base/strings/string_util_pa_unittest.cc",
"partition_alloc_base/strings/stringprintf_pa_unittest.cc",
"partition_alloc_base/thread_annotations_pa_unittest.cc",
"partition_alloc_unittest.cc",
"partition_lock_unittest.cc",
"reverse_bytes_unittest.cc",
"slot_start_unittest.cc",
"thread_cache_unittest.cc",
"use_death_tests.h",
]
}
if (is_fuchsia) {
sources +=
[ "partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
}
if (use_allocator_shim) {
sources += [
"shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
]
if (is_win) {
sources += [ "shim/winheap_stubs_win_unittest.cc" ]
}
if (is_ios) {
sources += [
"shim/allocator_interception_apple_unittest.mm",
"shim/malloc_zone_functions_apple_unittest.cc",
]
}
}
if ((is_android || is_linux) && target_cpu == "arm64") {
cflags = [
"-Xclang",
"-target-feature",
"-Xclang",
"+mte",
]
}
if (enable_pkeys && is_debug && !is_component_build) {
# This test requires RELRO, which is not enabled in component builds.
# Also, require a debug build, since we only disable stack protectors in
# debug builds in PartitionAlloc (see below why it's needed).
sources += [ "thread_isolation/pkey_unittest.cc" ]
# We want to test the pkey code without access to memory that is not
# pkey-tagged. This will allow us to catch unintended memory accesses
# that could break our security assumptions. The stack protector reads a
# value from the TLS which won't be pkey-tagged, hence disabling it for
# the test.
configs += [ ":no_stack_protector" ]
}
frameworks = []
if (is_mac) {
frameworks += [
"Foundation.framework",
"OpenCL.framework",
]
}
deps = [
":partition_alloc",
":test_support",
"//testing/gmock",
"//testing/gtest",
]
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
}
source_set("test_support") {
testonly = true
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
"partition_alloc_for_testing.h",
"pointers/raw_ptr_counting_impl_for_test.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_apple) {
sources += [
"partition_alloc_base/threading/platform_thread_apple_for_testing.mm",
]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":arm_bti_testfunctions",
":buildflags",
":partition_alloc",
":raw_ptr",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
configs -= partition_alloc_remove_configs
configs += partition_alloc_add_configs
}
source_set("arm_bti_testfunctions") {
testonly = true
sources = []
if (target_cpu == "arm64" && (is_linux || is_android)) {
sources = [
"arm_bti_test_functions.S",
"arm_bti_test_functions.h",
]
}
}
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if defined(__MUSL__)
// Musl does not support ifunc.
@@ -11,11 +11,10 @@
#include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/reservation_offset_table.h"
@@ -125,7 +124,7 @@ void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_);
#endif
@@ -204,7 +203,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
bit_hint_ = end_bit;
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + requested_size <= address_end_);
#endif
return address;
@@ -246,7 +245,7 @@ void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(address + free_size <= address_end_);
#endif
@@ -10,12 +10,11 @@
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h"
@@ -162,7 +161,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t total_bits_ = 0;
uintptr_t address_begin_ = 0;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
uintptr_t address_end_ = 0;
#endif
@@ -4,7 +4,7 @@
#include "partition_alloc/address_pool_manager_bitmap.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
@@ -11,9 +11,9 @@
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_lock.h"
@@ -5,7 +5,7 @@
#include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/random.h"
@@ -178,8 +178,18 @@ AslrMask(uintptr_t bits) {
}
#else // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#if PA_BUILDFLAG(IS_LINUX)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
return AslrMask(46);
}
@@ -187,6 +197,8 @@ AslrMask(uintptr_t bits) {
return AslrAddress(0);
}
#endif
#endif // !PA_BUILDFLAG(IS_AIX) && !PA_BUILDFLAG(PA_ARCH_CPU_BIG_ENDIAN)
#elif PA_BUILDFLAG(PA_ARCH_CPU_S390X)
@@ -7,8 +7,8 @@
#include <cstddef>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
namespace partition_alloc {
@@ -48,7 +48,7 @@
#include "partition_alloc/buildflag.h" // IWYU pragma: export
// Definition of PA_BUILDFLAG(IS_CHROMEOS).
#include "partition_alloc/chromeos_buildflags.h" // IWYU pragma: export
#include "partition_alloc/buildflags.h" // IWYU pragma: export
// Clangd does not detect PA_BUILDFLAG_INTERNAL_* indirect usage, so mark the
// header as "always_keep" to avoid "unused include" warning.
@@ -116,6 +116,6 @@ template("pa_buildflag_header") {
"visibility",
])
public_deps = [ "${_current_dir}:build_config" ]
public_deps = [ "${_current_dir}:buildflag_macro" ]
}
}
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
@@ -5,14 +5,14 @@
#ifndef PARTITION_ALLOC_COMPRESSED_POINTER_H_
#define PARTITION_ALLOC_COMPRESSED_POINTER_H_
#include <bit>
#include <climits>
#include <type_traits>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
@@ -78,7 +78,7 @@ constexpr bool IsDecayedSame =
class CompressedPointerBaseGlobal final {
public:
static constexpr size_t kUsefulBits =
std::countr_zero(PartitionAddressSpace::CorePoolsSize());
base::bits::CountrZero(PartitionAddressSpace::CorePoolsSize());
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
static constexpr size_t kBitsToShift =
kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
@@ -232,7 +232,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
0);
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
@@ -243,7 +243,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
PA_DCHECK(!ptr ||
(base & kCorePoolsBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer and truncate.
@@ -9,11 +9,10 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/partition_alloc-inl.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -177,7 +176,7 @@ class EncodedNextFreelistEntry {
// SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the
// size.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Regular freelists always point to an entry within the same super page.
//
// This is most likely a PartitionAlloc bug if this triggers.
@@ -186,7 +185,7 @@ class EncodedNextFreelistEntry {
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
FreelistCorruptionDetected(0);
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
encoded_next_ = EncodedFreelistPtr(entry);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
@@ -4,7 +4,7 @@
#include "partition_alloc/extended_api.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "partition_alloc/thread_cache.h"
@@ -5,7 +5,7 @@
#ifndef PARTITION_ALLOC_EXTENDED_API_H_
#define PARTITION_ALLOC_EXTENDED_API_H_
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/thread_cache.h"
@@ -9,10 +9,10 @@
#include <cstdint>
#include <utility>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
@@ -92,7 +92,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
*cell &= ~CellWithAOne(bit_index);
}
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Checks if the cells that are meant to contain only unset bits are really 0.
auto [begin_cell, begin_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
@@ -131,7 +131,7 @@ PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) {
PA_DCHECK(*cell == 0u);
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
}
} // namespace partition_alloc::internal
@@ -7,9 +7,9 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/reservation_offset_table.h"
@@ -19,17 +19,21 @@
namespace partition_alloc {
namespace {
PartitionOptions GwpAsanPartitionOptions() {
PartitionOptions options;
options.backup_ref_ptr = PartitionOptions::kEnabled;
return options;
}
} // namespace
// static
void* GwpAsanSupport::MapRegion(size_t slot_count,
std::vector<uint16_t>& free_list) {
PA_CHECK(slot_count > 0);
constexpr PartitionOptions kConfig = []() {
PartitionOptions opts;
opts.backup_ref_ptr = PartitionOptions::kEnabled;
return opts;
}();
static internal::base::NoDestructor<PartitionRoot> root(kConfig);
static internal::base::NoDestructor<PartitionRoot> root(
GwpAsanPartitionOptions());
const size_t kSlotSize = 2 * internal::SystemPageSize();
uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
@@ -5,8 +5,8 @@
#ifndef PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#define PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
@@ -6,28 +6,23 @@
#define PARTITION_ALLOC_IN_SLOT_METADATA_H_
#include <atomic>
#include <bit>
#include <cstddef>
#include <cstdint>
#include <limits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/tagging.h"
#if PA_BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_base/bits.h"
#endif // PA_BUILDFLAG(IS_APPLE)
namespace partition_alloc::internal {
// Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for
@@ -40,10 +35,10 @@ namespace partition_alloc::internal {
// Placed outside `PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
// intentionally to accommodate usage in contexts also outside
// this gating.
PA_ALWAYS_INLINE size_t
AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
PA_ALWAYS_INLINE constexpr size_t AlignUpInSlotMetadataSizeForApple(
size_t in_slot_metadata_size) {
#if PA_BUILDFLAG(IS_APPLE)
return internal::base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
return base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
#else
return in_slot_metadata_size;
#endif // PA_BUILDFLAG(IS_APPLE)
@@ -171,9 +166,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
std::numeric_limits<CountType>::max());
static constexpr auto kPtrInc =
SafeShift<CountType>(1, std::countr_zero(kPtrCountMask));
SafeShift<CountType>(1, base::bits::CountrZero(kPtrCountMask));
static constexpr auto kUnprotectedPtrInc =
SafeShift<CountType>(1, std::countr_zero(kUnprotectedPtrCountMask));
SafeShift<CountType>(1, base::bits::CountrZero(kUnprotectedPtrCountMask));
PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack);
@@ -550,7 +545,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
uintptr_t refcount_address =
slot_start + slot_size - sizeof(InSlotMetadata);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0);
#endif
@@ -563,7 +558,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
<< GetInSlotMetadataIndexMultiplierShift();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize());
#endif
@@ -575,7 +570,7 @@ PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
static inline constexpr size_t kInSlotMetadataSizeAdjustment =
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
sizeof(InSlotMetadata);
AlignUpInSlotMetadataSizeForApple(sizeof(InSlotMetadata));
#else
0ul;
#endif
@@ -26,7 +26,7 @@ PartitionRoot& InternalAllocatorRoot();
// A class that meets C++ named requirements, Allocator.
template <typename T>
InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
typename InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
std::size_t count) {
PA_CHECK(count <=
std::numeric_limits<std::size_t>::max() / sizeof(value_type));
@@ -64,8 +64,9 @@ LightweightQuarantineBranch::~LightweightQuarantineBranch() {
bool LightweightQuarantineBranch::Quarantine(void* object,
SlotSpanMetadata* slot_span,
uintptr_t slot_start) {
const auto usable_size = root_.allocator_root_.GetSlotUsableSize(slot_span);
uintptr_t slot_start,
size_t usable_size) {
PA_DCHECK(usable_size == root_.allocator_root_.GetSlotUsableSize(slot_span));
const size_t capacity_in_bytes =
branch_capacity_in_bytes_.load(std::memory_order_relaxed);
@@ -86,7 +87,7 @@ bool LightweightQuarantineBranch::Quarantine(void* object,
// Put the entry onto the list.
branch_size_in_bytes_ += usable_size;
slots_.emplace_back(slot_start, usable_size);
slots_.push_back({slot_start, usable_size});
// Swap randomly so that the quarantine list remain shuffled.
// This is not uniformly random, but sufficiently random.
@@ -110,7 +110,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
// immediately). Otherwise, returns `true`.
bool Quarantine(void* object,
SlotSpanMetadata* slot_span,
uintptr_t slot_start);
uintptr_t slot_start,
size_t usable_size);
// Dequarantine all entries **held by this branch**.
// It is possible that another branch with entries and it remains untouched.
@@ -4,16 +4,12 @@
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc.h"
#include "partition_alloc/partition_alloc_base/no_destructor.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc {
// static
@@ -63,26 +59,6 @@ void MemoryReclaimer::Reclaim(int flags) {
internal::ScopedGuard lock(
lock_); // Has to protect from concurrent (Un)Register calls.
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && PA_BUILDFLAG(USE_STARSCAN)
{
using PCScan = internal::PCScan;
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::PerformScanIfNeeded(invocation_mode);
}
#endif // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
// PA_BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
// Don't completely empty the thread cache outside of low memory situations,
// as there is periodic purge which makes sure that it doesn't take too much
@@ -5,12 +5,12 @@
#include "partition_alloc/page_allocator.h"
#include <atomic>
#include <bit>
#include <cstdint>
#include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/page_allocator_internal.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_lock.h"
@@ -79,7 +79,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
uintptr_t alignment_offset,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(base_length >= trim_length);
PA_DCHECK(std::has_single_bit(alignment));
PA_DCHECK(internal::base::bits::HasSingleBit(alignment));
PA_DCHECK(alignment_offset < alignment);
uintptr_t new_base =
NextAlignedWithOffset(base_address, alignment, alignment_offset);
@@ -108,7 +108,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t alignment,
uintptr_t requested_offset) {
PA_DCHECK(std::has_single_bit(alignment));
PA_DCHECK(internal::base::bits::HasSingleBit(alignment));
PA_DCHECK(requested_offset < alignment);
uintptr_t actual_offset = address & (alignment - 1);
@@ -183,7 +183,7 @@ uintptr_t AllocPagesWithAlignOffset(
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
PA_DCHECK(std::has_single_bit(align));
PA_DCHECK(internal::base::bits::HasSingleBit(align));
PA_DCHECK(align_offset < align);
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
@@ -9,10 +9,10 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/thread_isolation/thread_isolation.h"
namespace partition_alloc {
@@ -173,6 +173,14 @@ void FreePages(void* address, size_t length);
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
//
// Note: On Windows, setting permissions to `PAGE_NOACCESS` will also decommit
// pages. This is desirable because clients assume that pages with no access
// rights should be "free" from a resource standpoint. In particular this allows
// clients to map a large amount of memory, set its access rights to
// `PAGE_NOACCESS` and not worry about commit limit exhaustion.
// On the flip side, this means that changing permissions can often fail on this
// platform.
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
uintptr_t address,
size_t length,
@@ -187,6 +195,8 @@ void FreePages(void* address, size_t length);
// bytes.
//
// Performs a CHECK that the operation succeeds.
//
// See the note above for Windows-specific behavior.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetSystemPagesAccess(uintptr_t address,
size_t length,
@@ -323,6 +333,13 @@ void RecommitSystemPages(
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
//
// WARNING: Do not discard a large amount of pages, for a potentially long
// duration. Discarded pages are *not* decommitted on Windows, where total
// system-wide committed memory is limited. As most Chromium OOM crashes are
// commit limit related, this will both impact Private Memory Footprint (which
// reports committed memory) and stability (since we will bump into the limit
// more often).
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DiscardSystemPages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
@@ -26,7 +26,8 @@
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#elif (PA_BUILDFLAG(IS_ANDROID) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64))
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)) || \
(PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_PPC64))
// This should work for all POSIX (if needed), but currently all other
// supported OS/architecture combinations use either hard-coded values
// (such as x86) or have means to determine these values without needing
@@ -86,17 +87,7 @@ PageAllocationGranularity();
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PageAllocationGranularityShift() {
#if PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#elif defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
#if defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
@@ -106,6 +97,26 @@ PageAllocationGranularityShift() {
page_characteristics.shift.store(shift, std::memory_order_relaxed);
}
return shift;
#elif PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#elif PA_BUILDFLAG(IS_WIN) || PA_BUILDFLAG(PA_ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
return 14; // 16kB
#elif PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#else
return 12; // 4kB
#endif
@@ -5,8 +5,8 @@
#include <sys/mman.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(HAS_MEMORY_TAGGING) || \
(defined(__ARM_FEATURE_BTI_DEFAULT) && (__ARM_FEATURE_BTI_DEFAULT == 1) && \
@@ -14,10 +14,10 @@
#include <cstring>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
#include "partition_alloc/partition_alloc_check.h"
@@ -299,14 +299,14 @@ void DecommitSystemPagesInternal(
bool change_permissions =
accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// This is not guaranteed, show that we're serious.
//
// More specifically, several callers have had issues with assuming that
// memory is zeroed, this would hopefully make these bugs more visible. We
// don't memset() everything, because ranges can be very large, and doing it
// over the entire range could make Chrome unusable with
// PA_BUILDFLAG(PA_DCHECK_IS_ON).
// PA_BUILDFLAG(DCHECKS_ARE_ON).
//
// Only do it when we are about to change the permissions, since we don't know
// the previous permissions, and cannot restore them.
@@ -7,11 +7,11 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_internal.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
namespace partition_alloc::internal {
@@ -5,7 +5,6 @@
#include "partition_alloc/partition_address_space.h"
#include <array>
#include <bit>
#include <cstddef>
#include <cstdint>
#include <ostream>
@@ -13,11 +12,12 @@
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/compressed_pointer.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -183,6 +183,19 @@ void PartitionAddressSpace::Init() {
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, pools_fd);
#if PA_BUILDFLAG(IS_ANDROID)
// On Android, Adreno-GSL library fails to mmap if we snatch address
// 0x400000000. Find a different address instead.
if (setup_.regular_pool_base_address_ == 0x400000000) {
uintptr_t new_base_address =
AllocPages(glued_pool_sizes, glued_pool_sizes,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, pools_fd);
FreePages(setup_.regular_pool_base_address_, glued_pool_sizes);
setup_.regular_pool_base_address_ = new_base_address;
}
#endif // PA_BUILDFLAG(IS_ANDROID)
if (!setup_.regular_pool_base_address_) {
HandlePoolAllocFailure();
}
@@ -263,16 +276,6 @@ void PartitionAddressSpace::Init() {
PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
#endif // PA_BUILDFLAG(GLUE_CORE_POOLS)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool";
#endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
#if PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
#endif // PA_BUILDFLAG(ENABLE_POINTER_COMPRESSION)
@@ -295,7 +298,7 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
PA_CHECK(pool_base);
PA_CHECK(size <= kConfigurablePoolMaxSize);
PA_CHECK(size >= kConfigurablePoolMinSize);
PA_CHECK(std::has_single_bit(size));
PA_CHECK(base::bits::HasSingleBit(size));
PA_CHECK(pool_base % size == 0);
setup_.configurable_pool_base_address_ = pool_base;
@@ -401,7 +404,7 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
if (IsThreadIsolatedPoolInitialized()) {
UnprotectThreadIsolatedGlobals();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
ThreadIsolationSettings::settings.enabled = false;
#endif
@@ -5,17 +5,17 @@
#ifndef PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
#define PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
#include <bit>
#include <cstddef>
#include <utility>
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -315,7 +315,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
"because the test process cannot use an extended virtual address space. "
"Temporarily disable ShadowMetadata feature on iOS");
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Check whether the given |ptr| points to an address inside the address space
// reserved for the regular and brp shadow. However the result |true| doesn't
// mean the given |ptr| is valid. Because we don't use the entire address
@@ -328,7 +328,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
ptr_as_uintptr < pool_shadow_address_ + BRPPoolSize() ||
ptr_as_uintptr < pool_shadow_address_ + kConfigurablePoolMaxSize));
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
static void InitShadowMetadata(PoolHandleMask pool);
static void MapMetadata(uintptr_t super_page, bool copy_metadata);
@@ -382,17 +382,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(std::has_single_bit(kRegularPoolSize));
static_assert(std::has_single_bit(kBRPPoolSize));
static_assert(base::bits::HasSingleBit(kRegularPoolSize));
static_assert(base::bits::HasSingleBit(kBRPPoolSize));
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
static constexpr size_t kThreadIsolatedPoolSize = kGiB / 4;
static_assert(std::has_single_bit(kThreadIsolatedPoolSize));
static_assert(base::bits::HasSingleBit(kThreadIsolatedPoolSize));
#endif
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
static_assert(std::has_single_bit(kConfigurablePoolMaxSize));
static_assert(std::has_single_bit(kConfigurablePoolMinSize));
static_assert(base::bits::HasSingleBit(kConfigurablePoolMaxSize));
static_assert(base::bits::HasSingleBit(kConfigurablePoolMinSize));
#if PA_BUILDFLAG(IS_IOS)
@@ -407,8 +407,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
static_assert(std::has_single_bit(kRegularPoolSizeForIOSTestProcess));
static_assert(std::has_single_bit(kBRPPoolSizeForIOSTestProcess));
static_assert(base::bits::HasSingleBit(kRegularPoolSizeForIOSTestProcess));
static_assert(base::bits::HasSingleBit(kBRPPoolSizeForIOSTestProcess));
#endif // PA_BUILDFLAG(IOS_IOS)
#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
@@ -9,9 +9,9 @@
#include <cstring>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/in_slot_metadata.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/random.h"
#include "partition_alloc/tagging.h"
@@ -57,7 +57,7 @@ PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
#pragma optimize("", on)
#endif
#if PA_BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
#if PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
// Used to memset() memory for debugging purposes only.
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses
@@ -71,12 +71,12 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}
#endif // PA_BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
#endif // PA_BUILDFLAG(EXPENSIVE_DCHECKS_ARE_ON)
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics.
#if !PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) {
@@ -87,7 +87,7 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
counter--;
return counter == 0;
}
#endif // !PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // !PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
return UntagPtr(ptr);
@@ -9,10 +9,9 @@
#include <memory>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_hooks.h"
#include "partition_alloc/partition_direct_map_extent.h"
#include "partition_alloc/partition_oom.h"
@@ -20,10 +19,6 @@
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc {
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
@@ -7,8 +7,8 @@
#include <cstdint>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/tagging.h"
namespace partition_alloc {
@@ -8,8 +8,8 @@
#include <mach/mach.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/logging.h"
// Use the PA_MACH_LOG family of macros along with a mach_error_t
@@ -47,7 +47,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) MachLogMessage
} // namespace partition_alloc::internal::logging
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_MACH_DVLOG_IS_ON(verbose_level) PA_VLOG_IS_ON(verbose_level)
#else
#define PA_MACH_DVLOG_IS_ON(verbose_level) 0
@@ -91,9 +91,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) MachLogMessage
PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
PA_MACH_DVLOG_IS_ON(verbose_level) && (condition))
#define PA_MACH_DCHECK(condition, mach_err) \
PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err), \
PA_BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
#define PA_MACH_DCHECK(condition, mach_err) \
PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err), \
PA_BUILDFLAG(DCHECKS_ARE_ON) && !(condition)) \
<< "Check failed: " #condition << ". "
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_MACH_LOGGING_H_
@@ -7,37 +7,42 @@
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_BITS_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_BITS_H_
#include <bit>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
namespace partition_alloc::internal::base::bits {
// Bit functions in <bit> are restricted to a specific set of types of unsigned
// integer; restrict functions in this file that are related to those in that
// header to match for consistency.
template <typename T>
concept UnsignedInteger =
std::unsigned_integral<T> && !std::same_as<T, bool> &&
!std::same_as<T, char> && !std::same_as<T, char8_t> &&
!std::same_as<T, char16_t> && !std::same_as<T, char32_t> &&
!std::same_as<T, wchar_t>;
// Backport of C++20 std::has_single_bit in <bit>.
//
// Returns true iff |value| is a power of 2.
template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
constexpr bool HasSingleBit(T value) {
// From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
//
// Only positive integers with a single bit set are powers of two. If only one
// bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
// to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
// |x & (x-1)| is 0 iff x is a power of two.
return value > 0 && (value & (value - 1)) == 0;
}
// Round down |size| to a multiple of alignment, which must be a power of two.
template <typename T>
requires UnsignedInteger<T>
inline constexpr T AlignDown(T size, T alignment) {
PA_BASE_DCHECK(std::has_single_bit(alignment));
static_assert(std::is_unsigned_v<T>);
PA_BASE_DCHECK(HasSingleBit(alignment));
return size & ~(alignment - 1);
}
// Move |ptr| back to the previous multiple of alignment, which must be a power
// of two. Defined for types where sizeof(T) is one byte.
template <typename T>
requires(sizeof(T) == 1)
inline T* AlignDown(T* ptr, size_t alignment) {
return reinterpret_cast<T*>(
AlignDown(reinterpret_cast<uintptr_t>(ptr), alignment));
@@ -45,54 +50,105 @@ inline T* AlignDown(T* ptr, size_t alignment) {
// Round up |size| to a multiple of alignment, which must be a power of two.
template <typename T>
requires UnsignedInteger<T>
inline constexpr T AlignUp(T size, T alignment) {
PA_BASE_DCHECK(std::has_single_bit(alignment));
static_assert(std::is_unsigned_v<T>);
PA_BASE_DCHECK(HasSingleBit(alignment));
return (size + alignment - 1) & ~(alignment - 1);
}
// Advance |ptr| to the next multiple of alignment, which must be a power of
// two. Defined for types where sizeof(T) is one byte.
template <typename T>
requires(sizeof(T) == 1)
inline T* AlignUp(T* ptr, size_t alignment) {
return reinterpret_cast<T*>(
AlignUp(reinterpret_cast<size_t>(ptr), alignment));
}
// Returns the integer i such as 2^i <= n < 2^(i+1).
// Backport of C++20 std::countl_zero in <bit>.
//
// A common use for this function is to measure the number of bits required to
// contain a value; for that case use std::bit_width().
// CountlZero(value) returns the number of zero bits following the
// most significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns {sizeof(T) * 8}.
// Example: 00100010 -> 2
//
// A common use for this function is to take its result and use it to left-shift
// a bit; instead of doing so, use std::bit_floor().
constexpr int Log2Floor(uint32_t n) {
return 31 - std::countl_zero(n);
// CountrZero(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns {sizeof(T) * 8}.
// Example: 00100010 -> 1
//
// C does not have an operator to do this, but fortunately the various
// compilers have built-ins that map to fast underlying processor instructions.
// __builtin_clz has undefined behaviour for an input of 0, even though there's
// clearly a return value that makes sense, and even though some processor clz
// instructions have defined behaviour for 0. We could drop to raw __asm__ to
// do better, but we'll avoid doing that unless we see proof that we need to.
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE constexpr
typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
CountlZero(T value) {
static_assert(bits > 0, "invalid instantiation");
#if PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
// We would prefer to use the _BitScanReverse(64) intrinsics, but they
// aren't constexpr and thus unusable here.
if (PA_LIKELY(value)) {
int leading_zeros = 0;
constexpr T kMostSignificantBitMask = 1ull << (bits - 1);
for (; !(value & kMostSignificantBitMask); value <<= 1, ++leading_zeros) {
}
return leading_zeros;
}
return bits;
#else
return PA_LIKELY(value)
? bits == 64
? __builtin_clzll(static_cast<uint64_t>(value))
: __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
: bits;
#endif // PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
}
// Backport of C++20 std::countr_zero in <bit>.
//
// Returns the number of consecutive 0 bits, starting from the least significant
// one.
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE constexpr
typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
CountrZero(T value) {
#if PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
// We would prefer to use the _BitScanForward(64) intrinsics, but they
// aren't constexpr and thus unusable here.
if (PA_LIKELY(value)) {
int trailing_zeros = 0;
constexpr T kLeastSignificantBitMask = 1ull;
for (; !(value & kLeastSignificantBitMask); value >>= 1, ++trailing_zeros) {
}
return trailing_zeros;
}
return bits;
#else
return PA_LIKELY(value) ? bits == 64
? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value))
: bits;
#endif // PA_BUILDFLAG(PA_COMPILER_MSVC) && !defined(__clang__)
}
// Backport of C++20 std::bit_width in <bit>.
//
// Returns the smallest i such as n <= 2^i.
// This represent the number of bits needed to store values up to n.
constexpr int BitWidth(uint32_t n) {
return 32 - CountlZero(n);
}
// Returns the integer i such as 2^(i-1) < n <= 2^i.
//
// A common use for this function is to measure the number of bits required to
// contain a value; for that case use std::bit_width().
//
// A common use for this function is to take its result and use it to left-shift
// a bit; instead of doing so, use std::bit_ceil().
constexpr int Log2Ceiling(uint32_t n) {
// When n == 0, we want the function to return -1.
// When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
// why the statement below starts with (n ? 32 : -1).
return (n ? 32 : -1) - std::countl_zero(n - 1);
}
// Returns a value of type T with a single bit set in the left-most position.
// Can be used instead of manually shifting a 1 to the left. Unlike the other
// functions in this file, usable for any integral type.
template <typename T>
requires std::integral<T>
constexpr T LeftmostBit() {
T one(1u);
return one << (8 * sizeof(T) - 1);
return (n ? 32 : -1) - CountlZero(n - 1);
}
} // namespace partition_alloc::internal::base::bits
@@ -7,9 +7,9 @@
#include <iosfwd>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_base/log_message.h"
#include "partition_alloc/partition_alloc_base/strings/cstring_builder.h"
@@ -145,7 +145,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
#error "Debug builds are not expected to be optimized as official builds."
#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#if defined(OFFICIAL_BUILD) && !PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if defined(OFFICIAL_BUILD) && !PA_BUILDFLAG(DCHECKS_ARE_ON)
// Discard log strings to reduce code bloat.
//
@@ -185,7 +185,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
#endif
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_BASE_DCHECK(condition) \
PA_LAZY_CHECK_STREAM( \
@@ -0,0 +1,196 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "partition_alloc/partition_alloc_base/debug/proc_maps_linux.h"
#include <fcntl.h>
#include <stddef.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/files/file_util.h"
#include "partition_alloc/partition_alloc_base/logging.h"
#include "partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
#include "partition_alloc/partition_alloc_check.h"
#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS) || \
PA_BUILDFLAG(IS_ANDROID)
#include <inttypes.h>
#endif
namespace partition_alloc::internal::base::debug {
namespace {
// On Android the 'open' function may have two versions:
// int open(const char *pathname, int flags);
// int open(const char *pathname, int flags, mode_t mode);
//
// This doesn't play well with WrapEINTR template. This alias helps the compiler
// to make a decision.
int OpenFile(const char* pathname, int flags) {
return open(pathname, flags);
}
} // namespace
// Scans |proc_maps| starting from |pos| returning true if the gate VMA was
// found, otherwise returns false.
static bool ContainsGateVMA(std::string* proc_maps, size_t pos) {
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM_FAMILY)
// The gate VMA on ARM kernels is the interrupt vectors page.
return proc_maps->find(" [vectors]\n", pos) != std::string::npos;
#elif PA_BUILDFLAG(PA_ARCH_CPU_X86_64)
// The gate VMA on x86 64-bit kernels is the virtual system call page.
return proc_maps->find(" [vsyscall]\n", pos) != std::string::npos;
#else
// Otherwise assume there is no gate VMA in which case we shouldn't
// get duplicate entries.
return false;
#endif
}
bool ReadProcMaps(std::string* proc_maps) {
// seq_file only writes out a page-sized amount on each call. Refer to header
// file for details.
const size_t read_size = static_cast<size_t>(sysconf(_SC_PAGESIZE));
int fd = WrapEINTR(OpenFile)("/proc/self/maps", O_RDONLY);
if (fd == -1) {
PA_LOG(ERROR) << "Couldn't open /proc/self/maps";
WrapEINTR(close)(fd);
return false;
}
proc_maps->clear();
while (true) {
// To avoid a copy, resize |proc_maps| so read() can write directly into it.
// Compute |buffer| afterwards since resize() may reallocate.
size_t pos = proc_maps->size();
proc_maps->resize(pos + read_size);
void* buffer = &(*proc_maps)[pos];
ssize_t bytes_read = WrapEINTR(read)(fd, buffer, read_size);
if (bytes_read < 0) {
PA_DPLOG(ERROR) << "Couldn't read /proc/self/maps";
proc_maps->clear();
WrapEINTR(close)(fd);
return false;
}
// ... and don't forget to trim off excess bytes.
proc_maps->resize(pos + static_cast<size_t>(bytes_read));
if (bytes_read == 0) {
break;
}
// The gate VMA is handled as a special case after seq_file has finished
// iterating through all entries in the virtual memory table.
//
// Unfortunately, if additional entries are added at this point in time
// seq_file gets confused and the next call to read() will return duplicate
// entries including the gate VMA again.
//
// Avoid this by searching for the gate VMA and breaking early.
if (ContainsGateVMA(proc_maps, pos)) {
break;
}
}
WrapEINTR(close)(fd);
return true;
}
bool ParseProcMaps(const std::string& input,
std::vector<MappedMemoryRegion>* regions_out) {
PA_CHECK(regions_out);
std::vector<MappedMemoryRegion> regions;
// This isn't async safe nor terribly efficient, but it doesn't need to be at
// this point in time.
std::vector<std::string> lines;
// Split the input into lines.
int start = 0;
for (size_t i = 0; i < input.size(); ++i) {
if (input[i] == '\n') {
lines.push_back(input.substr(start, i - start));
start = i + 1;
}
}
lines.push_back(input.substr(start));
for (size_t i = 0; i < lines.size(); ++i) {
// Due to splitting on '\n' the last line should be empty.
if (i == lines.size() - 1) {
if (!lines[i].empty()) {
PA_DLOG(WARNING) << "Last line not empty";
return false;
}
break;
}
MappedMemoryRegion region;
const char* line = lines[i].c_str();
char permissions[5] = {'\0'}; // Ensure NUL-terminated string.
uint8_t dev_major = 0;
uint8_t dev_minor = 0;
long inode = 0;
int path_index = 0;
// Sample format from man 5 proc:
//
// address perms offset dev inode pathname
// 08048000-08056000 r-xp 00000000 03:0c 64593 /usr/sbin/gpm
//
// The final %n term captures the offset in the input string, which is used
// to determine the path name. It *does not* increment the return value.
// Refer to man 3 sscanf for details.
if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4c %llx %hhx:%hhx %ld %n",
&region.start, &region.end, permissions, &region.offset,
&dev_major, &dev_minor, &inode, &path_index) < 7) {
PA_LOG(WARNING) << "sscanf failed for line: " << line;
return false;
}
region.permissions = 0;
if (permissions[0] == 'r') {
region.permissions |= MappedMemoryRegion::READ;
} else if (permissions[0] != '-') {
return false;
}
if (permissions[1] == 'w') {
region.permissions |= MappedMemoryRegion::WRITE;
} else if (permissions[1] != '-') {
return false;
}
if (permissions[2] == 'x') {
region.permissions |= MappedMemoryRegion::EXECUTE;
} else if (permissions[2] != '-') {
return false;
}
if (permissions[3] == 'p') {
region.permissions |= MappedMemoryRegion::PRIVATE;
} else if (permissions[3] != 's' &&
permissions[3] != 'S') { // Shared memory.
return false;
}
// Pushing then assigning saves us a string copy.
regions.push_back(region);
regions.back().path.assign(line + path_index);
}
regions_out->swap(regions);
return true;
}
} // namespace partition_alloc::internal::base::debug
@@ -0,0 +1,93 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_PROC_MAPS_LINUX_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_PROC_MAPS_LINUX_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "partition_alloc/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base::debug {
// Describes a region of mapped memory and the path of the file mapped.
struct MappedMemoryRegion {
enum Permission {
READ = 1 << 0,
WRITE = 1 << 1,
EXECUTE = 1 << 2,
PRIVATE = 1 << 3, // If set, region is private, otherwise it is shared.
};
// The address range [start,end) of mapped memory.
uintptr_t start;
uintptr_t end;
// Byte offset into |path| of the range mapped into memory.
unsigned long long offset;
// Image base, if this mapping corresponds to an ELF image.
uintptr_t base;
// Bitmask of read/write/execute/private/shared permissions.
uint8_t permissions;
// Name of the file mapped into memory.
//
// NOTE: path names aren't guaranteed to point at valid files. For example,
// "[heap]" and "[stack]" are used to represent the location of the process'
// heap and stack, respectively.
std::string path;
};
// Reads the data from /proc/self/maps and stores the result in |proc_maps|.
// Returns true if successful, false otherwise.
//
// There is *NO* guarantee that the resulting contents will be free of
// duplicates or even contain valid entries by time the method returns.
//
//
// THE GORY DETAILS
//
// Did you know it's next-to-impossible to atomically read the whole contents
// of /proc/<pid>/maps? You would think that if we passed in a large-enough
// buffer to read() that It Should Just Work(tm), but sadly that's not the case.
//
// Linux's procfs uses seq_file [1] for handling iteration, text formatting,
// and dealing with resulting data that is larger than the size of a page. That
// last bit is especially important because it means that seq_file will never
// return more than the size of a page in a single call to read().
//
// Unfortunately for a program like Chrome the size of /proc/self/maps is
// larger than the size of page so we're forced to call read() multiple times.
// If the virtual memory table changed in any way between calls to read() (e.g.,
// a different thread calling mprotect()), it can make seq_file generate
// duplicate entries or skip entries.
//
// Even if seq_file was changed to keep flushing the contents of its page-sized
// buffer to the usermode buffer inside a single call to read(), it has to
// release its lock on the virtual memory table to handle page faults while
// copying data to usermode. This puts us in the same situation where the table
// can change while we're copying data.
//
// Alternatives such as fork()-and-suspend-the-parent-while-child-reads were
// attempted, but they present more subtle problems than it's worth. Depending
// on your use case your best bet may be to read /proc/<pid>/maps prior to
// starting other threads.
//
// [1] http://kernelnewbies.org/Documents/SeqFileHowTo
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReadProcMaps(std::string* proc_maps);
// Parses /proc/<pid>/maps input data and stores in |regions|. Returns true
// and updates |regions| if and only if all of |input| was successfully parsed.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool ParseProcMaps(const std::string& input,
std::vector<MappedMemoryRegion>* regions);
} // namespace partition_alloc::internal::base::debug
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_PROC_MAPS_LINUX_H_
@@ -19,7 +19,7 @@ extern "C" void* __libc_stack_end;
namespace partition_alloc::internal::base::debug {
namespace {
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
// GCC and LLVM generate slightly different frames on ARM, see
@@ -141,11 +141,11 @@ uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
return 0;
}
#endif // PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#endif // PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// We force this function to be inlined into its callers (e.g.
// TraceStackFramePointers()) in all build modes so we don't have to worry about
@@ -199,7 +199,7 @@ PA_NOINLINE size_t TraceStackFramePointers(const void** out_trace,
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
uintptr_t GetStackEnd() {
#if PA_BUILDFLAG(IS_ANDROID)
// Bionic reads proc/maps on every call to pthread_getattr_np() when called
@@ -9,13 +9,13 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
namespace partition_alloc::internal::base::debug {
// Returns end of the stack, or 0 if we couldn't get it.
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
uintptr_t GetStackEnd();
#endif
@@ -37,7 +37,7 @@ void OutputStackTrace(unsigned index,
uintptr_t offset);
#endif
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// For stack scanning to be efficient it's very important for the thread to
// be started by Chrome. In that case we naturally terminate unwinding once
@@ -67,7 +67,7 @@ size_t TraceStackFramePointers(const void** out_trace,
size_t skip_initial,
bool enable_scanning = kEnableScanningByDefault);
#endif // PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#endif // PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace partition_alloc::internal::base::debug
@@ -5,7 +5,7 @@
#include "partition_alloc/partition_alloc_base/debug/stack_trace.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace partition_alloc::internal::base::debug {
@@ -13,7 +13,7 @@ size_t CollectStackTrace(const void** trace, size_t count) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
#if PA_BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
#if PA_BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// Regarding Linux and Android, backtrace API internally invokes malloc().
// So the API is not available inside memory allocation. Instead try tracing
// using frame pointers.
@@ -5,7 +5,7 @@
#include "partition_alloc/partition_alloc_base/debug/stack_trace.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
// Surprisingly, uClibc defines __GLIBC__ in some build configs, but
@@ -9,8 +9,8 @@
#include <zircon/types.h>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/logging.h"
// Use the PA_ZX_LOG family of macros along with a zx_status_t containing a
@@ -54,17 +54,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ZxLogMessage
#define PA_ZX_DLOG(severity, zx_err) \
PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), PA_DLOG_IS_ON(severity))
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_ZX_DLOG_IF(severity, condition, zx_err) \
PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), \
PA_DLOG_IS_ON(severity) && (condition))
#else // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#else // PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_ZX_DLOG_IF(severity, condition, zx_err) PA_EAT_STREAM_PARAMETERS
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_ZX_DCHECK(condition, zx_err) \
PA_LAZY_STREAM(PA_ZX_LOG_STREAM(DCHECK, zx_err), \
PA_BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
#define PA_ZX_DCHECK(condition, zx_err) \
PA_LAZY_STREAM(PA_ZX_LOG_STREAM(DCHECK, zx_err), \
PA_BUILDFLAG(DCHECKS_ARE_ON) && !(condition)) \
<< "Check failed: " #condition << ". "
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
@@ -63,13 +63,13 @@ LogMessageHandlerFunction g_log_message_handler = nullptr;
} // namespace
#if PA_BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#if PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE)
// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
// determined at run-time. We default it to ERROR, to avoid it triggering
// crashes before the run-time has explicitly chosen the behaviour.
PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
logging::LogSeverity LOGGING_DCHECK = LOGGING_ERROR;
#endif // PA_BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#endif // PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE)
// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
// an object of the correct type on the LHS of the unused part of the ternary
@@ -8,8 +8,8 @@
#include <cstddef>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/scoped_clear_last_error.h"
#include "partition_alloc/partition_alloc_base/strings/cstring_builder.h"
@@ -41,7 +41,7 @@ constexpr LogSeverity LOGGING_NUM_SEVERITIES = 4;
// LOGGING_DFATAL is LOGGING_FATAL in DCHECK-enabled builds, ERROR in normal
// mode.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
constexpr LogSeverity LOGGING_DFATAL = LOGGING_FATAL;
#else
constexpr LogSeverity LOGGING_DFATAL = LOGGING_ERROR;
@@ -10,9 +10,9 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/log_message.h"
// TODO(crbug.com/40158212): Need to update the description, because logging for
@@ -337,7 +337,7 @@ constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
// Definitions for DLOG et al.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_DLOG_IS_ON(severity) PA_LOG_IS_ON(severity)
#define PA_DLOG_IF(severity, condition) PA_LOG_IF(severity, condition)
@@ -347,11 +347,11 @@ constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
#define PA_DVPLOG_IF(verboselevel, condition) \
PA_VPLOG_IF(verboselevel, condition)
#else // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#else // PA_BUILDFLAG(DCHECKS_ARE_ON)
// If !PA_BUILDFLAG(PA_DCHECK_IS_ON), we want to avoid emitting any references
// If !PA_BUILDFLAG(DCHECKS_ARE_ON), we want to avoid emitting any references
// to |condition| (which may reference a variable defined only if
// PA_BUILDFLAG(PA_DCHECK_IS_ON)). Contrast this with DCHECK et al., which has
// PA_BUILDFLAG(DCHECKS_ARE_ON)). Contrast this with DCHECK et al., which has
// different behavior.
#define PA_DLOG_IS_ON(severity) false
@@ -361,7 +361,7 @@ constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
#define PA_DVLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
#define PA_DVPLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_DLOG(severity) \
PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_DLOG_IS_ON(severity))
@@ -375,11 +375,11 @@ constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
// Definitions for DCHECK et al.
#if PA_BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#if PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE)
PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) extern LogSeverity LOGGING_DCHECK;
#else
constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
#endif // PA_BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#endif // PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE)
// Redefine the standard assert to use our nice log files
#undef assert
@@ -9,7 +9,7 @@
#include <type_traits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
namespace partition_alloc::internal::base::subtle {
@@ -21,7 +21,7 @@ bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
return !ref_count_.IsZero();
}
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
PA_BASE_DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
"calling Release()";
@@ -6,11 +6,11 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/atomic_ref_count.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
namespace partition_alloc::internal::base {
@@ -28,12 +28,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
: ref_count_(1) {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
needs_adopt_ref_ = true;
#endif
}
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
~RefCountedThreadSafeBase();
#else
~RefCountedThreadSafeBase() = default;
@@ -59,14 +59,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
friend scoped_refptr<U> AdoptRef(U*);
void Adopted() const {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_BASE_DCHECK(needs_adopt_ref_);
needs_adopt_ref_ = false;
#endif
}
PA_ALWAYS_INLINE void AddRefImpl() const {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_BASE_DCHECK(!in_dtor_);
// This RefCounted object is created with non-zero reference count.
// The first reference to such a object has to be made by AdoptRef or
@@ -77,7 +77,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
}
PA_ALWAYS_INLINE void AddRefWithCheckImpl() const {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_BASE_DCHECK(!in_dtor_);
// This RefCounted object is created with non-zero reference count.
// The first reference to such a object has to be made by AdoptRef or
@@ -88,12 +88,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
}
PA_ALWAYS_INLINE bool ReleaseImpl() const {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_BASE_DCHECK(!in_dtor_);
PA_BASE_DCHECK(!ref_count_.IsZero());
#endif
if (!ref_count_.Decrement()) {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
in_dtor_ = true;
#endif
return true;
@@ -102,7 +102,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
}
mutable AtomicRefCount ref_count_{0};
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
@@ -5,7 +5,7 @@
#include "partition_alloc/partition_alloc_base/strings/cstring_builder.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
#if !PA_BUILDFLAG(IS_WIN)
@@ -16,7 +16,7 @@
#include <cstring>
#include <limits>
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#include "partition_alloc/partition_alloc_base/check.h"
#define PA_RAW_DCHECK PA_RAW_CHECK
#else
@@ -0,0 +1,87 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_TEST_GTEST_UTIL_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_TEST_GTEST_UTIL_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "testing/gtest/include/gtest/gtest.h"
// EXPECT/ASSERT_DCHECK_DEATH is intended to replace EXPECT/ASSERT_DEBUG_DEATH
// when the death is expected to be caused by a DCHECK. Contrary to
// EXPECT/ASSERT_DEBUG_DEATH however, it doesn't execute the statement in non-
// dcheck builds as DCHECKs are intended to catch things that should never
// happen and as such executing the statement results in undefined behavior
// (|statement| is compiled in unsupported configurations nonetheless).
// DCHECK_IS_CONFIGURABLE is excluded from DCHECK_DEATH because it's non-FATAL
// by default and there are no known tests that configure a FATAL level. If this
// gets used from FATAL contexts under DCHECK_IS_CONFIGURABLE this may need to
// be updated to look at LOGGING_DCHECK's current severity level.
// Death tests misbehave on Android.
#if PA_BUILDFLAG(DCHECKS_ARE_ON) && defined(GTEST_HAS_DEATH_TEST) && \
!PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE) && !PA_BUILDFLAG(IS_ANDROID)
// EXPECT/ASSERT_DCHECK_DEATH tests verify that a DCHECK is hit ("Check failed"
// is part of the error message). Optionally you may specify part of the message
// to verify which DCHECK (or LOG(DFATAL)) is being hit.
#define PA_EXPECT_DCHECK_DEATH(statement) \
EXPECT_DEATH(statement, "Check failed")
#define PA_EXPECT_DCHECK_DEATH_WITH(statement, msg) EXPECT_DEATH(statement, msg)
#define PA_ASSERT_DCHECK_DEATH(statement) \
ASSERT_DEATH(statement, "Check failed")
#define PA_ASSERT_DCHECK_DEATH_WITH(statement, msg) ASSERT_DEATH(statement, msg)
#else
#define PA_EXPECT_DCHECK_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", )
#define PA_EXPECT_DCHECK_DEATH_WITH(statement, msg) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, msg, )
#define PA_ASSERT_DCHECK_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", return)
#define PA_ASSERT_DCHECK_DEATH_WITH(statement, msg) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, msg, return)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON) && defined(GTEST_HAS_DEATH_TEST) &&
// !PA_BUILDFLAG(DCHECK_IS_CONFIGURABLE) && !PA_BUILDFLAG(IS_ANDROID)
// As above, but for CHECK().
#if defined(GTEST_HAS_DEATH_TEST) && !PA_BUILDFLAG(IS_ANDROID)
#if PA_BASE_CHECK_WILL_STREAM()
#define PA_EXPECT_CHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed")
#define PA_EXPECT_CHECK_DEATH_WITH(statement, msg) EXPECT_DEATH(statement, msg)
#define PA_ASSERT_CHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed")
#define PA_EXPECT_NOTREACHED_DEATH(statement) \
EXPECT_DEATH(statement, "NOTREACHED hit")
#define PA_ASSERT_NOTREACHED_DEATH(statement) \
ASSERT_DEATH(statement, "NOTREACHED hit")
#else
#define PA_EXPECT_CHECK_DEATH(statement) EXPECT_DEATH(statement, "")
#define PA_EXPECT_CHECK_DEATH_WITH(statement, msg) EXPECT_DEATH(statement, "")
#define PA_ASSERT_CHECK_DEATH(statement) ASSERT_DEATH(statement, "")
#define PA_EXPECT_NOTREACHED_DEATH(statement) EXPECT_DEATH(statement, "")
#define PA_ASSERT_NOTREACHED_DEATH(statement) ASSERT_DEATH(statement, "")
#endif // PA_BASE_CHECK_WILL_STREAM()
#else // defined(GTEST_HAS_DEATH_TEST) && !PA_BUILDFLAG(IS_ANDROID)
// Note GTEST_UNSUPPORTED_DEATH_TEST takes a |regex| only to see whether it is a
// valid regex. It is never evaluated.
#define PA_EXPECT_CHECK_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "", )
#define PA_EXPECT_CHECK_DEATH_WITH(statement, msg) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "", )
#define PA_ASSERT_CHECK_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "", return)
#define PA_EXPECT_NOTREACHED_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "", )
#define PA_ASSERT_NOTREACHED_DEATH(statement) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, "", return)
#endif // defined(GTEST_HAS_DEATH_TEST) && !PA_BUILDFLAG(IS_ANDROID)
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_TEST_GTEST_UTIL_H_
@@ -38,7 +38,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
#if defined(__clang__)
#define PA_THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
@@ -246,7 +246,7 @@ inline T& ts_unchecked_read(T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
// The above is imported as-is from abseil-cpp. The following Chromium-specific
// synonyms are added for Chromium concepts (SequenceChecker/ThreadChecker).
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Equivalent to PA_GUARDED_BY for SequenceChecker/ThreadChecker. Currently,
#define PA_GUARDED_BY_CONTEXT(name) PA_GUARDED_BY(name)
@@ -254,11 +254,11 @@ inline T& ts_unchecked_read(T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
// Equivalent to PA_EXCLUSIVE_LOCKS_REQUIRED for SequenceChecker/ThreadChecker.
#define PA_VALID_CONTEXT_REQUIRED(name) PA_EXCLUSIVE_LOCKS_REQUIRED(name)
#else // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#else // PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_GUARDED_BY_CONTEXT(name)
#define PA_VALID_CONTEXT_REQUIRED(name)
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
@@ -6,25 +6,7 @@
namespace partition_alloc::internal::base {
namespace {
// SetThreadNameHook is invoked by EnablePCScan(). EnablePCScan() will be
// invoked soon after running RunBrowser, RunZygote, and RunContentProcess.
// So g_set_thread_name_proc can be non-atomic.
SetThreadNameProc g_set_thread_name_proc = nullptr;
} // namespace
void PlatformThread::SetThreadNameHook(SetThreadNameProc hook) {
g_set_thread_name_proc = hook;
}
// static
void PlatformThread::SetName(const std::string& name) {
if (!g_set_thread_name_proc) {
return;
}
g_set_thread_name_proc(name);
}
void PlatformThread::SetName(const std::string& name) {}
} // namespace partition_alloc::internal::base
@@ -14,7 +14,7 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/logging.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
@@ -95,7 +95,7 @@ PlatformThreadId PlatformThread::CurrentId() {
g_is_main_thread = false;
}
} else {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
if (g_thread_id != syscall(__NR_gettid)) {
PA_RAW_LOG(
FATAL,
@@ -14,11 +14,11 @@
#include <memory>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/logging.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(IS_FUCHSIA)
#include <zircon/process.h>
@@ -26,9 +26,8 @@
#include <sys/resource.h>
#endif
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "partition_alloc/stack/stack.h"
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc::internal::base {
@@ -52,14 +51,14 @@ void* ThreadFunc(void* params) {
delegate = thread_params->delegate;
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
StackTopRegistry::Get().NotifyThreadCreated();
#endif
}
delegate->ThreadMain();
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
StackTopRegistry::Get().NotifyThreadDestroyed();
#endif
@@ -7,15 +7,14 @@
#include <cstddef>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "partition_alloc/stack/stack.h"
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc::internal::base {
@@ -63,7 +62,7 @@ DWORD __stdcall ThreadFunc(void* params) {
GetCurrentProcess(), &platform_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS);
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
StackTopRegistry::Get().NotifyThreadCreated();
#endif
@@ -75,7 +74,7 @@ DWORD __stdcall ThreadFunc(void* params) {
delete thread_params;
delegate->ThreadMain();
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
StackTopRegistry::Get().NotifyThreadDestroyed();
#endif
return 0;
@@ -73,7 +73,7 @@
#include "partition_alloc/partition_alloc_base/numerics/clamped_math.h"
#if PA_BUILDFLAG(IS_APPLE)
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
#endif // PA_BUILDFLAG(IS_APPLE)
#if PA_BUILDFLAG(IS_FUCHSIA)
@@ -0,0 +1,10 @@
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PARTITION_ALLOC_PARTITION_ALLOC_BUILDFLAGS_H_
#define PARTITION_ALLOC_PARTITION_ALLOC_BUILDFLAGS_H_
// TODO(crbug.com/41481467): Remove this alias, the time to updates dependants.
#include "partition_alloc/buildflags.h"
#endif // PARTITION_ALLOC_PARTITION_ALLOC_BUILDFLAGS_H_
@@ -8,13 +8,12 @@
#include <cstdint>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/check.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_buildflags.h"
// When PartitionAlloc is used as the default allocator, we cannot use the
// regular (D)CHECK() macros, as they allocate internally. When an assertion is
@@ -33,11 +32,11 @@
PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
: PA_EAT_CHECK_STREAM_PARAMS()
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_DCHECK(condition) PA_CHECK(condition)
#else
#define PA_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_PCHECK(condition) \
if (!(condition)) { \
@@ -47,11 +46,11 @@
} \
static_assert(true)
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#define PA_DPCHECK(condition) PA_PCHECK(condition)
#else
#define PA_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
#else // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// !PA_BASE_CHECK_WILL_STREAM()
@@ -65,7 +64,7 @@
// Expensive dchecks that run within *Scan. These checks are only enabled in
// debug builds with dchecks enabled.
#if !defined(NDEBUG)
#define PA_SCAN_DCHECK_IS_ON() PA_BUILDFLAG(PA_DCHECK_IS_ON)
#define PA_SCAN_DCHECK_IS_ON() PA_BUILDFLAG(DCHECKS_ARE_ON)
#else
#define PA_SCAN_DCHECK_IS_ON() 0
#endif
@@ -6,8 +6,7 @@
#define PARTITION_ALLOC_PARTITION_ALLOC_CONFIG_H_
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/buildflags.h"
// PA_CONFIG() uses a similar trick as BUILDFLAG() to allow the compiler catch
// typos or a missing #include.
@@ -36,13 +35,6 @@ static_assert(sizeof(void*) == 8, "");
static_assert(sizeof(void*) != 8, "");
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#if PA_BUILDFLAG(HAS_64_BIT_POINTERS) && \
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
#else
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
#endif
#if PA_BUILDFLAG(HAS_64_BIT_POINTERS) && PA_BUILDFLAG(IS_IOS)
// Allow PA to select an alternate pool size at run-time before initialization,
// rather than using a single constexpr value.
@@ -56,72 +48,12 @@ static_assert(sizeof(void*) != 8, "");
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS) && PA_BUILDFLAG(IS_IOS)
#if PA_BUILDFLAG(HAS_64_BIT_POINTERS) && \
(PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_ANDROID))
#include <linux/version.h>
// TODO(bikineev): Enable for ChromeOS.
#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
#else
#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
#endif // PA_BUILDFLAG(HAS_64_BIT_POINTERS) &&
// (PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_ANDROID))
#if PA_BUILDFLAG(USE_STARSCAN)
// Use card table to avoid races for PCScan configuration without safepoints.
// The card table provides the guaranteee that for a marked card the underling
// super-page is fully initialized.
#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 1
#else
// The card table is permanently disabled for 32-bit.
#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
#endif // PA_BUILDFLAG(USE_STARSCAN)
// Use batched freeing when sweeping pages. This builds up a freelist in the
// scanner thread and appends to the slot-span's freelist only once.
#define PA_CONFIG_STARSCAN_BATCHED_FREE() 1
// TODO(bikineev): Temporarily disable inlining in *Scan to get clearer
// stacktraces.
#define PA_CONFIG_STARSCAN_NOINLINE_SCAN_FUNCTIONS() 1
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
// cause significant jank.
#define PA_CONFIG_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM() 0
// Double free detection comes with expensive cmpxchg (with the loop around it).
// We currently disable it to improve the runtime.
#define PA_CONFIG_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED() 0
// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
// features such as futex(2).
#define PA_CONFIG_HAS_LINUX_KERNEL() \
(PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS) || \
PA_BUILDFLAG(IS_ANDROID))
// On some platforms, we implement locking by spinning in userspace, then going
// into the kernel only if there is contention. This requires platform support,
// namely:
// - On Linux, futex(2)
// - On Windows, a fast userspace "try" operation which is available with
// SRWLock
// - On macOS, pthread_mutex_trylock() is fast by default starting with macOS
// 10.14. Chromium targets an earlier version, so it cannot be known at
// compile-time. So we use something different.
// TODO(crbug.com/40274152): macOS 10.15 is now required; switch to
// better locking.
// - Otherwise, on POSIX we assume that a fast userspace pthread_mutex_trylock()
// is available.
//
// Otherwise, a userspace spinlock implementation is used.
#if PA_CONFIG(HAS_LINUX_KERNEL) || PA_BUILDFLAG(IS_WIN) || \
PA_BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(IS_POSIX) || \
PA_BUILDFLAG(IS_FUCHSIA)
#define PA_CONFIG_HAS_FAST_MUTEX() 1
#else
#define PA_CONFIG_HAS_FAST_MUTEX() 0
#endif
// If defined, enables zeroing memory on Free() with roughly 1% probability.
// This applies only to normal buckets, as direct-map allocations are always
// decommitted.
@@ -163,7 +95,7 @@ static_assert(sizeof(void*) == 8);
#endif
// Specifies whether allocation extras need to be added.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) || PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if PA_BUILDFLAG(DCHECKS_ARE_ON) || PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#define PA_CONFIG_EXTRAS_REQUIRED() 1
#else
#define PA_CONFIG_EXTRAS_REQUIRED() 0
@@ -204,10 +136,10 @@ static_assert(sizeof(void*) == 8);
// calling malloc() again.
//
// Limitations:
// - PA_BUILDFLAG(PA_DCHECK_IS_ON) due to runtime cost
// - PA_BUILDFLAG(DCHECKS_ARE_ON) due to runtime cost
// - thread_local TLS to simplify the implementation
// - Not on Android due to bot failures
#if PA_BUILDFLAG(PA_DCHECK_IS_ON) && \
#if PA_BUILDFLAG(DCHECKS_ARE_ON) && \
PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
PA_CONFIG(THREAD_LOCAL_TLS) && !PA_BUILDFLAG(IS_ANDROID)
#define PA_CONFIG_HAS_ALLOCATION_GUARD() 1
@@ -253,7 +185,7 @@ constexpr bool kUseLazyCommit = false;
#define PA_CONFIG_IN_SLOT_METADATA_CHECK_COOKIE() \
(!(PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) && \
(PA_BUILDFLAG(PA_DCHECK_IS_ON) || \
(PA_BUILDFLAG(DCHECKS_ARE_ON) || \
PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)))
// Use available space in the reference count to store the initially requested
@@ -349,6 +281,6 @@ static_assert(__cplusplus >= 202002L,
// Named pass-through that determines whether or not PA should generally
// enforce that `SlotStart` instances are in fact slot starts.
#define PA_CONFIG_ENFORCE_SLOT_STARTS() PA_BUILDFLAG(PA_DCHECK_IS_ON)
#define PA_CONFIG_ENFORCE_SLOT_STARTS() PA_BUILDFLAG(DCHECKS_ARE_ON)
#endif // PARTITION_ALLOC_PARTITION_ALLOC_CONFIG_H_
@@ -12,10 +12,10 @@
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/flags.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_forward.h"
@@ -100,7 +100,13 @@ constexpr size_t kPartitionCachelineSize = 64;
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
#if (PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)) || \
defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return PageAllocationGranularityShift() + 2;
}
#elif defined(_MIPS_ARCH_LOONGSON) || PA_BUILDFLAG(PA_ARCH_CPU_LOONGARCH64)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return 16; // 64 KiB
@@ -110,12 +116,6 @@ PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return 18; // 256 KiB
}
#elif (PA_BUILDFLAG(IS_APPLE) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)) || \
defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return PageAllocationGranularityShift() + 2;
}
#else
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
@@ -455,7 +455,14 @@ PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
// Max alignment supported by AlignedAlloc().
// kSuperPageSize alignment can't be easily supported, because each super page
// starts with guard pages & metadata.
// TODO(casey.smalley@arm.com): under 64k pages we can end up in a situation
// where a normal slot span will be large enough to contain multiple items,
// but the address will go over the final partition page after being aligned.
#if PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 4;
#else
constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
#endif
constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
@@ -10,9 +10,9 @@
#include <cstdint>
#include <type_traits>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_config.h"
@@ -5,12 +5,12 @@
#include "partition_alloc/partition_bucket.h"
#include <algorithm>
#include <bit>
#include <cstdint>
#include <tuple>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/oom.h"
@@ -22,10 +22,8 @@
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/alias.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -38,10 +36,6 @@
#include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/tagging.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/pcscan.h"
#endif
namespace partition_alloc::internal {
namespace {
@@ -186,7 +180,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
size_t raw_size,
size_t slot_span_alignment) {
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
std::has_single_bit(slot_span_alignment));
base::bits::HasSingleBit(slot_span_alignment));
// No static EXCLUSIVE_LOCKS_REQUIRED(), as the checker doesn't understand
// scoped unlocking.
@@ -255,7 +249,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
const size_t reservation_size = PartitionRoot::GetDirectMapReservationSize(
raw_size + padding_for_alignment);
PA_DCHECK(reservation_size >= raw_size);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
const size_t available_reservation_size =
reservation_size - padding_for_alignment -
PartitionRoot::GetDirectMapMetadataAndGuardPagesSize();
@@ -791,19 +785,7 @@ PartitionBucket::InitializeSuperPage(PartitionRoot* root,
uintptr_t state_bitmap =
super_page + PartitionPageSize() +
(is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize());
#if PA_BUILDFLAG(USE_STARSCAN)
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
const size_t state_bitmap_reservation_size =
root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
const size_t state_bitmap_size_to_commit =
root->IsQuarantineAllowed() ? CommittedStateBitmapSize() : 0;
PA_DCHECK(state_bitmap_reservation_size % PartitionPageSize() == 0);
PA_DCHECK(state_bitmap_size_to_commit % SystemPageSize() == 0);
PA_DCHECK(state_bitmap_size_to_commit <= state_bitmap_reservation_size);
uintptr_t payload = state_bitmap + state_bitmap_reservation_size;
#else
uintptr_t payload = state_bitmap;
#endif // PA_BUILDFLAG(USE_STARSCAN)
root->next_partition_page = payload;
root->next_partition_page_end = root->next_super_page - PartitionPageSize();
@@ -886,24 +868,6 @@ PartitionBucket::InitializeSuperPage(PartitionRoot* root,
payload < SuperPagesEndFromExtent(current_extent));
}
// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
// sure to register the super-page after it has been fully initialized.
// Otherwise, the concurrent scanner may try to access |extent->root| which
// could be not initialized yet.
#if PA_BUILDFLAG(USE_STARSCAN)
if (root->IsQuarantineEnabled()) {
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
root->PageAccessibilityWithThreadIsolationIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityDisposition::kRequireUpdate);
}
PCScan::RegisterNewSuperPage(root, super_page);
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
// Commit the pages for freeslot bitmap.
if (!is_direct_mapped()) {
@@ -1042,7 +1006,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
next_slot = next_slot_end;
next_slot_end = next_slot + slot_size;
prev_entry = entry;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
free_list_entries_added++;
#endif
}
@@ -1051,7 +1015,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
FreeSlotBitmapMarkSlotAsFree(return_slot);
#endif
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// The only provisioned slot not added to the free list is the one being
// returned.
PA_DCHECK(slots_to_provision == free_list_entries_added + 1);
@@ -1339,7 +1303,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
SlotSpanMetadata** slot_span,
bool* is_already_zeroed) {
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
std::has_single_bit(slot_span_alignment));
base::bits::HasSingleBit(slot_span_alignment));
// The slow path is called when the freelist is empty. The only exception is
// when a higher-order alignment is requested, in which case the freelist
@@ -135,7 +135,14 @@ struct PartitionBucket {
// Returns a slot number starting from the beginning of the slot span.
PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
// See the static assertion for `kReciprocalShift` above.
PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
// TODO(casey.smalley@arm.com): triggers on Aarch64/Linux
// systems with 64k system pages. Constants need to be
// adjusted to prevent different parts of the allocator
// from overlapping. For now this will allow 64k pages
// to function on Aarch64/Linux systems, albeit not
// very efficiently.
PA_DCHECK(internal::SystemPageSize() == (size_t{1} << 16) ||
offset_in_slot_span <= kMaxBucketed);
PA_DCHECK(slot_size <= kMaxBucketed);
const size_t offset_in_slot =
@@ -6,13 +6,12 @@
#define PARTITION_ALLOC_PARTITION_BUCKET_LOOKUP_H_
#include <array>
#include <bit>
#include <cstdint>
#include <utility>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -171,11 +170,17 @@ class BucketIndexLookup final {
bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
};
PA_ALWAYS_INLINE constexpr size_t RoundUpToPowerOfTwo(size_t size) {
const size_t n = 1 << base::bits::Log2Ceiling(static_cast<uint32_t>(size));
PA_CHECK(size <= n);
return n;
}
PA_ALWAYS_INLINE constexpr size_t RoundUpSize(size_t size) {
const size_t next_power = std::bit_ceil(size);
const size_t next_power = RoundUpToPowerOfTwo(size);
const size_t prev_power = next_power >> 1;
PA_DCHECK(size <= next_power);
PA_DCHECK(prev_power < size);
PA_CHECK(size <= next_power);
PA_CHECK(prev_power < size);
if (size <= prev_power * 5 / 4) {
return prev_power * 5 / 4;
} else {
@@ -194,7 +199,7 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
// materialized in the binary.
constexpr BucketIndexLookup lookup{};
const size_t order =
kBitsPerSizeT - static_cast<size_t>(std::countl_zero(size));
kBitsPerSizeT - static_cast<size_t>(base::bits::CountlZero(size));
// The order index is simply the next few bits after the most significant
// bit.
const size_t order_index =
@@ -5,8 +5,8 @@
#ifndef PARTITION_ALLOC_PARTITION_COOKIE_H_
#define PARTITION_ALLOC_PARTITION_COOKIE_H_
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
namespace partition_alloc::internal {
@@ -14,7 +14,7 @@ namespace partition_alloc::internal {
static constexpr size_t kCookieSize = 16;
// Cookie is enabled for debug builds.
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
inline constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
@@ -42,7 +42,7 @@ PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* address) {}
PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
} // namespace partition_alloc::internal
@@ -12,7 +12,7 @@
namespace partition_alloc::internal {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span) {
PartitionRoot* root = PartitionRoot::FromSlotSpanMetadata(slot_span);
@@ -62,6 +62,6 @@ void DCheckRootLockOfSlotSpanIsAcquired(internal::SlotSpanMetadata* slot_span) {
DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpanMetadata(slot_span));
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
} // namespace partition_alloc::internal
@@ -14,7 +14,7 @@ namespace partition_alloc::internal {
struct PartitionSuperPageExtentEntry;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// To allow these asserts to have empty bodies in no-DCHECK() builds, while
// avoiding issues with circular includes.
@@ -22,7 +22,7 @@ struct PartitionSuperPageExtentEntry;
// Export symbol if dcheck-is-on. Because the body is not empty.
#define PA_EXPORT_IF_DCHECK_IS_ON() PA_COMPONENT_EXPORT(PARTITION_ALLOC)
#else // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#else // PA_BUILDFLAG(DCHECKS_ARE_ON)
// The static_assert() eats follow-on semicolons.
#define PA_EMPTY_BODY_IF_DCHECK_IS_OFF() \
@@ -31,7 +31,7 @@ struct PartitionSuperPageExtentEntry;
// inline if dcheck-is-off so it's no overhead.
#define PA_EXPORT_IF_DCHECK_IS_ON() PA_ALWAYS_INLINE
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_EXPORT_IF_DCHECK_IS_ON()
void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span)
@@ -7,10 +7,10 @@
#include <cstddef>
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_constants.h"
namespace partition_alloc::internal {
@@ -101,7 +101,7 @@ struct PartitionFreelistDispatcher {
PartitionFreelistEntry* next) const = 0;
PA_ALWAYS_INLINE virtual uintptr_t ClearForAllocation(
PartitionFreelistEntry* entry) const = 0;
PA_ALWAYS_INLINE virtual constexpr bool IsEncodedNextPtrZero(
PA_ALWAYS_INLINE virtual bool IsEncodedNextPtrZero(
PartitionFreelistEntry* entry) const = 0;
#else
static const PartitionFreelistDispatcher* Create(
@@ -177,7 +177,7 @@ struct PartitionFreelistDispatcher {
return entry->ClearForAllocation();
}
PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero(
PA_ALWAYS_INLINE bool IsEncodedNextPtrZero(
PartitionFreelistEntry* entry) const {
return entry->IsEncodedNextPtrZero();
}
@@ -286,7 +286,7 @@ struct PartitionFreelistDispatcherImpl final : PartitionFreelistDispatcher {
return GetEntryImpl(entry)->ClearForAllocation();
}
PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero(
PA_ALWAYS_INLINE bool IsEncodedNextPtrZero(
PartitionFreelistEntry* entry) const override {
return GetEntryImpl(entry)->IsEncodedNextPtrZero();
}
@@ -9,8 +9,8 @@
#include <type_traits>
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
@@ -24,7 +24,7 @@ class PA_LOCKABLE Lock {
public:
inline constexpr Lock();
void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
LiftThreadIsolationScope lift_thread_isolation_restrictions;
#endif
@@ -65,7 +65,7 @@ class PA_LOCKABLE Lock {
}
void Release() PA_UNLOCK_FUNCTION() {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
LiftThreadIsolationScope lift_thread_isolation_restrictions;
#endif
@@ -76,7 +76,7 @@ class PA_LOCKABLE Lock {
}
void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
lock_.AssertAcquired();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
LiftThreadIsolationScope lift_thread_isolation_restrictions;
#endif
@@ -87,7 +87,7 @@ class PA_LOCKABLE Lock {
void Reinit() PA_UNLOCK_FUNCTION() {
lock_.AssertAcquired();
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
owning_thread_ref_.store(base::PlatformThreadRef(),
std::memory_order_release);
#endif
@@ -97,7 +97,7 @@ class PA_LOCKABLE Lock {
private:
SpinningMutex lock_;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Should in theory be protected by |lock_|, but we need to read it to detect
// recursive lock acquisition (and thus, the allocator becoming reentrant).
std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
@@ -8,14 +8,13 @@
#include <cstdint>
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_forward.h"
@@ -193,11 +192,11 @@ void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
return;
}
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
const PartitionFreelistDispatcher* freelist_dispatcher =
PartitionRoot::FromSlotSpanMetadata(this)->get_freelist_dispatcher();
freelist_dispatcher->CheckFreeList(freelist_head, bucket->slot_size);
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
// If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation.
@@ -317,7 +316,7 @@ void UnmapNow(uintptr_t reservation_start,
size_t reservation_size,
pool_handle pool) {
PA_DCHECK(reservation_start && reservation_size > 0);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) {
@@ -356,7 +355,7 @@ void UnmapNow(uintptr_t reservation_start,
IsManagedByPartitionAllocConfigurablePool(reservation_start));
#endif
}
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK((reservation_start & kSuperPageOffsetMask) == 0);
uintptr_t reservation_end = reservation_start + reservation_size;
@@ -10,14 +10,13 @@
#include "partition_alloc/address_pool_manager.h"
#include "partition_alloc/address_pool_manager_types.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap_constants.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_buildflags.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
@@ -29,21 +28,12 @@
#include "partition_alloc/partition_superpage_extent_entry.h"
#include "partition_alloc/reservation_offset_table.h"
#if PA_BUILDFLAG(USE_STARSCAN)
#include "partition_alloc/starscan/state_bitmap.h"
#endif
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
#include "partition_alloc/tagging.h"
#endif
namespace partition_alloc::internal {
#if PA_BUILDFLAG(USE_STARSCAN)
using AllocationStateMap =
StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
#endif
// Metadata of the slot span.
//
// Some notes on slot span states. It can be in one of four major states:
@@ -87,27 +77,25 @@ struct SlotSpanMetadata {
// CHECK()ed in AllocNewSlotSpan().
// The maximum number of bits needed to cover all currently supported OSes.
static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
static constexpr size_t kMaxSlotsPerSlotSpanBits = 15;
static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
// |marked_full| isn't equivalent to being full. Slot span is marked as full
// iff it isn't on the active slot span list (or any other list).
uint32_t marked_full : 1;
// |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
// be further differentiated by checking existence of the freelist.
uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
// |marked_full| isn't equivalent to being full. Slot span is marked as full
// iff it isn't on the active slot span list (or any other list).
uint32_t marked_full : 1;
private:
const uint32_t can_store_raw_size_ : 1;
uint32_t freelist_is_sorted_ : 1;
uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
uint16_t freelist_is_sorted_ : 1;
// If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
// used.
uint16_t in_empty_cache_ : 1;
uint16_t empty_cache_index_
: kMaxEmptyCacheIndexBits; // < kMaxFreeableSpans.
uint16_t unused2_ : (16 - 1 - kMaxEmptyCacheIndexBits);
uint16_t empty_cache_index_ : kMaxEmptyCacheIndexBits; // < kMaxFreeableSpans.
// Can use only 48 bits (6B) in this bitfield, as this structure is embedded
// in PartitionPage which has 2B worth of fields and must fit in 32B.
@@ -254,18 +242,13 @@ static_assert(sizeof(SlotSpanMetadata) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot.");
inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
: marked_full(0),
num_allocated_slots(0),
: num_allocated_slots(0),
num_unprovisioned_slots(0),
marked_full(0),
can_store_raw_size_(false),
freelist_is_sorted_(true),
unused1_(0),
in_empty_cache_(0),
empty_cache_index_(0),
unused2_(0) {
(void)unused1_;
(void)unused2_;
}
empty_cache_index_(0) {}
inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
@@ -377,47 +360,11 @@ PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
PartitionSuperPageToMetadataArea(super_page));
}
#if PA_BUILDFLAG(USE_STARSCAN)
// Size that should be reserved for state bitmap (if present) inside a super
// page. Elements of a super page are partition-page-aligned, hence the returned
// size is a multiple of partition page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
}
// Size that should be committed for state bitmap (if present) inside a super
// page. It is a multiple of system page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
CommittedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
}
// Returns the address/pointer to the state bitmap in the super page. It's the
// caller's responsibility to ensure that the bitmaps even exist.
PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
: 0);
}
PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
uintptr_t super_page) {
return reinterpret_cast<AllocationStateMap*>(
SuperPageStateBitmapAddr(super_page));
}
#else // PA_BUILDFLAG(USE_STARSCAN)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize() {
return 0ull;
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
bool with_quarantine) {
@@ -482,7 +429,7 @@ PA_ALWAYS_INLINE PartitionPageMetadata* PartitionPageMetadata::FromAddr(
uintptr_t address) {
uintptr_t super_page = address & kSuperPageBaseMask;
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(IsReservationStart(super_page));
DCheckIsWithInSuperPagePayload(address);
#endif
@@ -557,11 +504,11 @@ PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromAddr(
PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromSlotStart(
uintptr_t slot_start) {
auto* slot_span = FromAddr(slot_start);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Checks that the pointer is a multiple of slot size.
uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
return slot_span;
}
@@ -584,13 +531,13 @@ PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObject(void* object) {
PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerAddr(
uintptr_t address) {
auto* slot_span = FromAddr(address);
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// Checks that the address is within the expected object boundaries.
uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
uintptr_t shift_from_slot_start =
(address - slot_span_start) % slot_span->bucket->slot_size;
DCheckIsValidShiftFromSlotStart(slot_span, shift_from_slot_start);
#endif // PA_BUILDFLAG(PA_DCHECK_IS_ON)
#endif // PA_BUILDFLAG(DCHECKS_ARE_ON)
return slot_span;
}
@@ -615,7 +562,7 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata::GetRawSize() const {
PA_ALWAYS_INLINE void SlotSpanMetadata::SetFreelistHead(
PartitionFreelistEntry* new_head) {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
// |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
// as well.
uintptr_t new_head_untagged = UntagPtr(new_head);
@@ -683,7 +630,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
PartitionRoot* root,
const PartitionFreelistDispatcher* freelist_dispatcher)
PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
DCheckRootLockIsAcquired(root);
PA_DCHECK(!(freelist_dispatcher->GetNext(tail, bucket->slot_size)));
PA_DCHECK(number_of_freed);
@@ -768,7 +715,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
size_t num_slots_per_span = bucket->get_slots_per_span();
PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
num_unprovisioned_slots = static_cast<uint16_t>(num_slots_per_span);
PA_DCHECK(num_unprovisioned_slots);
ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
@@ -776,23 +723,13 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
next_slot_span = nullptr;
}
#if PA_BUILDFLAG(USE_STARSCAN)
// Returns the state bitmap from an address within a normal-bucket super page.
// It's the caller's responsibility to ensure that the bitmap exists.
PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
PA_DCHECK(IsManagedByNormalBuckets(address));
uintptr_t super_page = address & kSuperPageBaseMask;
return SuperPageStateBitmap(super_page);
}
#endif // PA_BUILDFLAG(USE_STARSCAN)
// Iterates over all slot spans in a super-page. |Callback| must return true if
// early return is needed.
template <typename Callback>
void IterateSlotSpans(uintptr_t super_page,
bool with_quarantine,
Callback callback) {
#if PA_BUILDFLAG(PA_DCHECK_IS_ON)
#if PA_BUILDFLAG(DCHECKS_ARE_ON)
PA_DCHECK(!(super_page % kSuperPageAlignment));
auto* extent_entry = PartitionSuperPageToExtent(super_page);
DCheckRootLockIsAcquired(extent_entry->root);
@@ -837,6 +774,11 @@ void IterateSlotSpans(uintptr_t super_page,
// Helper class derived from the implementation of `SlotSpanMetadata`
// that can (but does not _have_ to) enforce that it is in fact a slot
// start.
//
// Behavior is not well-defined if this class is used outside
// PartitionAlloc internals, e.g. if PA is deferring to sanitizers.
// In such cases, the return value from PA's `Alloc()` may not be
// a slot start - it might not be managed by PartitionAlloc at all.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SlotStart {
public:
template <bool enforce = PA_CONFIG(ENFORCE_SLOT_STARTS)>
@@ -849,6 +791,18 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SlotStart {
return result;
}
template <bool enforce = PA_CONFIG(ENFORCE_SLOT_STARTS)>
PA_ALWAYS_INLINE static SlotStart FromObject(void* tagged_object) {
uintptr_t untagged_slot_start =
internal::UntagAddr(reinterpret_cast<uintptr_t>(tagged_object));
return SlotStart::FromUntaggedAddr<enforce>(untagged_slot_start);
}
// Tagging objects is not free. Avoid calling this repeatedly.
PA_ALWAYS_INLINE void* ToObject() {
return internal::TagAddr(untagged_slot_start);
}
PA_ALWAYS_INLINE
void CheckIsSlotStart() {
auto* slot_span_metadata = SlotSpanMetadata::FromAddr(untagged_slot_start);
@@ -865,31 +819,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SlotStart {
: untagged_slot_start(untagged_slot_start) {}
};
// Helper class analogous to `SlotStart` and implemented in terms of
// the same.
//
// Notably, no untag-tag is incurred if `enforce` is false.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TaggedSlotStart {
public:
template <bool enforce = PA_CONFIG(ENFORCE_SLOT_STARTS)>
PA_ALWAYS_INLINE static TaggedSlotStart FromTaggedAddr(
uintptr_t tagged_slot_start) {
TaggedSlotStart result = TaggedSlotStart(tagged_slot_start);
if constexpr (enforce) {
SlotStart::FromUntaggedAddr<enforce>(
internal::UntagAddr(tagged_slot_start));
}
return result;
}
uintptr_t tagged_slot_start;
private:
PA_ALWAYS_INLINE
explicit TaggedSlotStart(uintptr_t tagged_slot_start)
: tagged_slot_start(tagged_slot_start) {}
};
} // namespace partition_alloc::internal
#endif // PARTITION_ALLOC_PARTITION_PAGE_H_

Some files were not shown because too many files have changed in this diff Show More