From 49399fa38690b9edefd5c997962ffa8771a9d8d1 Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Mon, 3 Nov 2025 21:49:19 +0530 Subject: [PATCH 1/8] [SWDEV-563823][Compiler-rt][ASan] Simplify API Logic 'asan_hsa_amd_ipc_memory_create'. - Use reinterpret_cast for pointer arithmetic. - Add sanitizer interception logic for api 'hsa_amd_pointer_info'. - Allow only valid values of ptr and len in non-ASan mode. - ptr == Actual agentBaseAddress && len == original_len_used_in_alloc - Allow only valid values of ptr and len in ASan mode. - Here pinfo is retrieved from external hsa_amd_pointer_info(not internal device allocator function AmdgpuMemFuncs::GetPointerInfo) - ptr == pinfo.agentBaseAddress && len == pinfo.sizeInBytes - ptr == original_ptr_returned_by_ASAN && len == original_len_used_in_alloc --- compiler-rt/lib/asan/asan_allocator.cpp | 21 +++++++++++++++++---- compiler-rt/lib/asan/asan_allocator.h | 5 +++++ compiler-rt/lib/asan/asan_interceptors.cpp | 10 ++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 06c827c41eacc..820b89c3cfdf2 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1399,7 +1399,10 @@ DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_attach, DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void *mapped_ptr) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_reserve_align, void** ptr, size_t size, uint64_t address, uint64_t alignment, uint64_t flags) -DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size); +DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) +DECLARE_REAL(hsa_status_t, hsa_amd_pointer_info, const void* ptr, + hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), + uint32_t* num_agents_accessible, hsa_agent_t** accessible) namespace __asan { @@ -1452,11 +1455,11 @@ static struct AP64 AP_; static struct AP32 AP_; #endif -hsa_status_t asan_hsa_amd_ipc_memory_create(void *ptr, size_t len, - hsa_amd_ipc_memory_t * handle) { +hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, + hsa_amd_ipc_memory_t* handle) { void *ptr_; size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr); - if (len_) { + if (len_ && len_ != len) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); ptr_ = reinterpret_cast(reinterpret_cast(ptr) - kPageSize_); } else { @@ -1540,5 +1543,15 @@ hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, } return REAL(hsa_amd_vmem_address_free)(ptr, size); } + +hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, + hsa_amd_pointer_info_t* info, + void* (*alloc)(size_t), + uint32_t* num_agents_accessible, + hsa_agent_t** accessible) { + void* p = get_allocator().GetBlockBegin(ptr); + return REAL(hsa_amd_pointer_info)(p ? p : ptr, info, alloc, + num_agents_accessible, accessible); +} } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h index ced10f62b7a58..f33e8d3b2819e 100644 --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -341,6 +341,11 @@ hsa_status_t asan_hsa_amd_vmem_address_reserve_align(void** ptr, size_t size, BufferedStackTrace* stack); hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, BufferedStackTrace* stack); +hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, + hsa_amd_pointer_info_t* info, + void* (*alloc)(size_t), + uint32_t* num_agents_accessible, + hsa_agent_t** accessible); } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp index 0951a77b1b93e..c04d532f909b1 100644 --- a/compiler-rt/lib/asan/asan_interceptors.cpp +++ b/compiler-rt/lib/asan/asan_interceptors.cpp @@ -948,6 +948,15 @@ INTERCEPTOR(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) { return asan_hsa_amd_vmem_address_free(ptr, size, &stack); } +INTERCEPTOR(hsa_status_t, hsa_amd_pointer_info, const void* ptr, + hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), + uint32_t* num_agents_accessible, hsa_agent_t** accessible) { + AsanInitFromRtl(); + ENSURE_HSA_INITED(); + return asan_hsa_amd_pointer_info(ptr, info, alloc, num_agents_accessible, + accessible); +} + void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_memory_copy); ASAN_INTERCEPT_FUNC(hsa_amd_memory_pool_allocate); @@ -962,6 +971,7 @@ void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_amd_ipc_memory_detach); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_reserve_align); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_free); + ASAN_INTERCEPT_FUNC(hsa_amd_pointer_info); } void ENSURE_HSA_INITED() { From 14df9891a6a6b3a0cd75917a5130701434122bb2 Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Wed, 5 Nov 2025 18:27:24 +0530 Subject: [PATCH 2/8] Address comments of @bing-ma. Improve logic of asan_hsa_amd_ipc_memory_create based on observations. --- compiler-rt/lib/asan/asan_allocator.cpp | 36 +++++++++------------- compiler-rt/lib/asan/asan_allocator.h | 5 --- compiler-rt/lib/asan/asan_interceptors.cpp | 10 ------ 3 files changed, 15 insertions(+), 36 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 820b89c3cfdf2..9901c4b8cafd5 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1400,9 +1400,6 @@ DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void *mapped_ptr) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_reserve_align, void** ptr, size_t size, uint64_t address, uint64_t alignment, uint64_t flags) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) -DECLARE_REAL(hsa_status_t, hsa_amd_pointer_info, const void* ptr, - hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), - uint32_t* num_agents_accessible, hsa_agent_t** accessible) namespace __asan { @@ -1457,16 +1454,23 @@ static struct AP32 AP_; hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, hsa_amd_ipc_memory_t* handle) { - void *ptr_; + static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); + void* ptr_ = get_allocator().GetBlockBegin(ptr); size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr); - if (len_ && len_ != len) { - static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); - ptr_ = reinterpret_cast(reinterpret_cast(ptr) - kPageSize_); - } else { - ptr_ = ptr; - len_ = len; + + uptr p = reinterpret_cast(ptr); + uptr p_ = reinterpret_cast(ptr_); + + if (p == p_) + return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); + + if (p == p_ + kPageSize_) { + AsanChunk* m = instance.GetAsanChunkByAddr(p_); + if (m && len == m->UsedSize()) + return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); + return REAL(hsa_amd_ipc_memory_create)(ptr_, len, handle); } - return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); + return REAL(hsa_amd_ipc_memory_create)(ptr, len, handle); } hsa_status_t asan_hsa_amd_ipc_memory_attach(const hsa_amd_ipc_memory_t *handle, @@ -1543,15 +1547,5 @@ hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, } return REAL(hsa_amd_vmem_address_free)(ptr, size); } - -hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, - hsa_amd_pointer_info_t* info, - void* (*alloc)(size_t), - uint32_t* num_agents_accessible, - hsa_agent_t** accessible) { - void* p = get_allocator().GetBlockBegin(ptr); - return REAL(hsa_amd_pointer_info)(p ? p : ptr, info, alloc, - num_agents_accessible, accessible); -} } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h index f33e8d3b2819e..ced10f62b7a58 100644 --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -341,11 +341,6 @@ hsa_status_t asan_hsa_amd_vmem_address_reserve_align(void** ptr, size_t size, BufferedStackTrace* stack); hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, BufferedStackTrace* stack); -hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, - hsa_amd_pointer_info_t* info, - void* (*alloc)(size_t), - uint32_t* num_agents_accessible, - hsa_agent_t** accessible); } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp index c04d532f909b1..0951a77b1b93e 100644 --- a/compiler-rt/lib/asan/asan_interceptors.cpp +++ b/compiler-rt/lib/asan/asan_interceptors.cpp @@ -948,15 +948,6 @@ INTERCEPTOR(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) { return asan_hsa_amd_vmem_address_free(ptr, size, &stack); } -INTERCEPTOR(hsa_status_t, hsa_amd_pointer_info, const void* ptr, - hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), - uint32_t* num_agents_accessible, hsa_agent_t** accessible) { - AsanInitFromRtl(); - ENSURE_HSA_INITED(); - return asan_hsa_amd_pointer_info(ptr, info, alloc, num_agents_accessible, - accessible); -} - void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_memory_copy); ASAN_INTERCEPT_FUNC(hsa_amd_memory_pool_allocate); @@ -971,7 +962,6 @@ void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_amd_ipc_memory_detach); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_reserve_align); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_free); - ASAN_INTERCEPT_FUNC(hsa_amd_pointer_info); } void ENSURE_HSA_INITED() { From 1a25efeb447b72f5dc0bdc83b18f60e06d92f01f Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Thu, 6 Nov 2025 11:07:17 +0530 Subject: [PATCH 3/8] Improve ASan implementations of hsa api calls based on @bing-ma suggestions. This commit again adds the interception of 'hsa_amd_pointer_info' call. --- compiler-rt/lib/asan/asan_allocator.cpp | 49 ++++++++++++++++------ compiler-rt/lib/asan/asan_allocator.h | 5 +++ compiler-rt/lib/asan/asan_interceptors.cpp | 10 +++++ 3 files changed, 51 insertions(+), 13 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 9901c4b8cafd5..feeffaa5bb8b1 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1400,6 +1400,9 @@ DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void *mapped_ptr) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_reserve_align, void** ptr, size_t size, uint64_t address, uint64_t alignment, uint64_t flags) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) +DECLARE_REAL(hsa_status_t, hsa_amd_pointer_info, const void* ptr, + hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), + uint32_t* num_agents_accessible, hsa_agent_t** accessible) namespace __asan { @@ -1454,21 +1457,18 @@ static struct AP32 AP_; hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, hsa_amd_ipc_memory_t* handle) { - static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); void* ptr_ = get_allocator().GetBlockBegin(ptr); - size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr); - - uptr p = reinterpret_cast(ptr); - uptr p_ = reinterpret_cast(ptr_); - - if (p == p_) - return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); - - if (p == p_ + kPageSize_) { - AsanChunk* m = instance.GetAsanChunkByAddr(p_); - if (m && len == m->UsedSize()) + AsanChunk* m = ptr_ + ? instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)) + : nullptr; + if (ptr_ && m) { + static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); + uptr p = reinterpret_cast(ptr); + uptr p_ = reinterpret_cast(ptr_); + if (p == p_ + kPageSize_ && len == m->UsedSize()) { + size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr); return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); - return REAL(hsa_amd_ipc_memory_create)(ptr_, len, handle); + } } return REAL(hsa_amd_ipc_memory_create)(ptr, len, handle); } @@ -1547,5 +1547,28 @@ hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, } return REAL(hsa_amd_vmem_address_free)(ptr, size); } + +hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, + hsa_amd_pointer_info_t* info, + void* (*alloc)(size_t), + uint32_t* num_agents_accessible, + hsa_agent_t** accessible) { + void* p = get_allocator().GetBlockBegin(ptr); + AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(p)); + hsa_status_t status; + if (p && m) + status = REAL(hsa_amd_pointer_info)(ptr, info, alloc, num_agents_accessible, + accessible); + if (status == HSA_STATUS_SUCCESS && info && p && m) { + static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); + info->agentBaseAddress = reinterpret_cast( + reinterpret_cast(info->agentBaseAddress) + kPageSize_); + info->hostBaseAddress = reinterpret_cast( + reinterpret_cast(info->hostBaseAddress) + kPageSize_); + info->sizeInBytes = m->UsedSize(); + } + return status; +} + } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h index ced10f62b7a58..f33e8d3b2819e 100644 --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -341,6 +341,11 @@ hsa_status_t asan_hsa_amd_vmem_address_reserve_align(void** ptr, size_t size, BufferedStackTrace* stack); hsa_status_t asan_hsa_amd_vmem_address_free(void* ptr, size_t size, BufferedStackTrace* stack); +hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, + hsa_amd_pointer_info_t* info, + void* (*alloc)(size_t), + uint32_t* num_agents_accessible, + hsa_agent_t** accessible); } // namespace __asan #endif diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp index 0951a77b1b93e..c04d532f909b1 100644 --- a/compiler-rt/lib/asan/asan_interceptors.cpp +++ b/compiler-rt/lib/asan/asan_interceptors.cpp @@ -948,6 +948,15 @@ INTERCEPTOR(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) { return asan_hsa_amd_vmem_address_free(ptr, size, &stack); } +INTERCEPTOR(hsa_status_t, hsa_amd_pointer_info, const void* ptr, + hsa_amd_pointer_info_t* info, void* (*alloc)(size_t), + uint32_t* num_agents_accessible, hsa_agent_t** accessible) { + AsanInitFromRtl(); + ENSURE_HSA_INITED(); + return asan_hsa_amd_pointer_info(ptr, info, alloc, num_agents_accessible, + accessible); +} + void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_memory_copy); ASAN_INTERCEPT_FUNC(hsa_amd_memory_pool_allocate); @@ -962,6 +971,7 @@ void InitializeAmdgpuInterceptors() { ASAN_INTERCEPT_FUNC(hsa_amd_ipc_memory_detach); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_reserve_align); ASAN_INTERCEPT_FUNC(hsa_amd_vmem_address_free); + ASAN_INTERCEPT_FUNC(hsa_amd_pointer_info); } void ENSURE_HSA_INITED() { From c67a73bfbc5a233151d3eebef58df340a7d14656 Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Fri, 7 Nov 2025 09:59:18 +0530 Subject: [PATCH 4/8] Address @b-sumner comments. --- compiler-rt/lib/asan/asan_allocator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index feeffaa5bb8b1..7b6bc9268f54e 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1553,13 +1553,13 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, void* (*alloc)(size_t), uint32_t* num_agents_accessible, hsa_agent_t** accessible) { - void* p = get_allocator().GetBlockBegin(ptr); - AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(p)); + void* ptr_ = get_allocator().GetBlockBegin(ptr); + AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)); hsa_status_t status; - if (p && m) + if (ptr_ && m) status = REAL(hsa_amd_pointer_info)(ptr, info, alloc, num_agents_accessible, accessible); - if (status == HSA_STATUS_SUCCESS && info && p && m) { + if (status == HSA_STATUS_SUCCESS && info && ptr_ && m) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); info->agentBaseAddress = reinterpret_cast( reinterpret_cast(info->agentBaseAddress) + kPageSize_); From 1f9b9c8aa4a81fc63fc50c455a12d395e7ff9aa8 Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Sat, 8 Nov 2025 23:28:01 +0530 Subject: [PATCH 5/8] Improve code for edge cases testing. - Changes to 'asan_hsa_amd_pointer_info' - Initialize status with default value 'HSA_STATUS_ERROR_NOT_INITIALIZE'. --- compiler-rt/lib/asan/asan_allocator.cpp | 461 ++++++++++++------------ 1 file changed, 229 insertions(+), 232 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 7b6bc9268f54e..250e82f14e1ec 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -52,9 +52,9 @@ static u32 RZSize2Log(u32 rz_size) { return res; } -static AsanAllocator &get_allocator(); +static AsanAllocator& get_allocator(); -static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, +static void AtomicContextStore(volatile atomic_uint64_t* atomic_context, u32 tid, u32 stack) { u64 context = tid; context <<= 32; @@ -62,8 +62,8 @@ static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, atomic_store(atomic_context, context, memory_order_relaxed); } -static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context, - u32 &tid, u32 &stack) { +static void AtomicContextLoad(const volatile atomic_uint64_t* atomic_context, + u32& tid, u32& stack) { u64 context = atomic_load(atomic_context, memory_order_relaxed); stack = context; context >>= 32; @@ -123,7 +123,7 @@ class ChunkHeader { AtomicContextStore(&alloc_context_id, tid, stack); } - void GetAllocContext(u32 &tid, u32 &stack) const { + void GetAllocContext(u32& tid, u32& stack) const { AtomicContextLoad(&alloc_context_id, tid, stack); } }; @@ -136,7 +136,7 @@ class ChunkBase : public ChunkHeader { AtomicContextStore(&free_context_id, tid, stack); } - void GetFreeContext(u32 &tid, u32 &stack) const { + void GetFreeContext(u32& tid, u32& stack) const { AtomicContextLoad(&free_context_id, tid, stack); } }; @@ -169,16 +169,16 @@ class LargeChunkHeader { static constexpr uptr kAllocBegMagic = FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL); atomic_uintptr_t magic; - AsanChunk *chunk_header; + AsanChunk* chunk_header; public: - AsanChunk *Get() const { + AsanChunk* Get() const { return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic ? chunk_header : nullptr; } - void Set(AsanChunk *p) { + void Set(AsanChunk* p) { if (p) { chunk_header = p; atomic_store(&magic, kAllocBegMagic, memory_order_release); @@ -193,9 +193,9 @@ class LargeChunkHeader { } }; -static void FillChunk(AsanChunk *m) { +static void FillChunk(AsanChunk* m) { // FIXME: Use ReleaseMemoryPagesToOS. - Flags &fl = *flags(); + Flags& fl = *flags(); if (fl.max_free_fill_size > 0) { // We have to skip the chunk header, it contains free_context_id. @@ -203,26 +203,24 @@ static void FillChunk(AsanChunk *m) { if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); - REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); + REAL(memset)((void*)scribble_start, fl.free_fill_byte, size_to_fill); } } } struct QuarantineCallback { - QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) - : cache_(cache), - stack_(stack) { - } + QuarantineCallback(AllocatorCache* cache, BufferedStackTrace* stack) + : cache_(cache), stack_(stack) {} - void PreQuarantine(AsanChunk *m) const { + void PreQuarantine(AsanChunk* m) const { FillChunk(m); // Poison the region. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), kAsanHeapFreeMagic); } - void Recycle(AsanChunk *m) const { - void *p = get_allocator().GetBlockBegin(m); + void Recycle(AsanChunk* m) const { + void* p = get_allocator().GetBlockBegin(m); // The secondary will immediately unpoison and unmap the memory, so this // branch is unnecessary. @@ -230,7 +228,7 @@ struct QuarantineCallback { if (p != m) { // Clear the magic value, as allocator internals may overwrite the // contents of deallocated chunk, confusing GetAsanChunk lookup. - reinterpret_cast(p)->Set(nullptr); + reinterpret_cast(p)->Set(nullptr); } u8 old_chunk_state = CHUNK_QUARANTINE; @@ -245,14 +243,14 @@ struct QuarantineCallback { } // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; thread_stats.really_freed += m->UsedSize(); get_allocator().Deallocate(cache_, p); } - void RecyclePassThrough(AsanChunk *m) const { + void RecyclePassThrough(AsanChunk* m) const { // Recycle for the secondary will immediately unpoison and unmap the // memory, so quarantine preparation is unnecessary. if (get_allocator().FromPrimary(m)) { @@ -262,15 +260,15 @@ struct QuarantineCallback { Recycle(m); } - void *Allocate(uptr size) const { - void *res = get_allocator().Allocate(cache_, size, 1); + void* Allocate(uptr size) const { + void* res = get_allocator().Allocate(cache_, size, 1); // TODO(alekseys): Consider making quarantine OOM-friendly. if (UNLIKELY(!res)) ReportOutOfMemory(size, stack_); return res; } - void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); } + void Deallocate(void* p) const { get_allocator().Deallocate(cache_, p); } private: AllocatorCache* const cache_; @@ -283,7 +281,7 @@ typedef AsanQuarantine::Cache QuarantineCache; void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } @@ -298,7 +296,7 @@ void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin, PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic); PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic); // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } @@ -309,7 +307,7 @@ void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { // Mark the corresponding shadow memory as not needed. FlushUnneededASanShadowMemory(p, size); // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.munmaps++; thread_stats.munmaped += size; } @@ -317,18 +315,18 @@ void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { // We can not use THREADLOCAL because it is not supported on some of the // platforms we care about (OSX 10.6, Android). // static THREADLOCAL AllocatorCache cache; -AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { +AllocatorCache* GetAllocatorCache(AsanThreadLocalMallocStorage* ms) { CHECK(ms); return &ms->allocator_cache; } -QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { +QuarantineCache* GetQuarantineCache(AsanThreadLocalMallocStorage* ms) { CHECK(ms); CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); - return reinterpret_cast(ms->quarantine_cache); + return reinterpret_cast(ms->quarantine_cache); } -void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { +void AllocatorOptions::SetFrom(const Flags* f, const CommonFlags* cf) { quarantine_size_mb = f->quarantine_size_mb; thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; min_redzone = f->redzone; @@ -338,7 +336,7 @@ void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; } -void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { +void AllocatorOptions::CopyTo(Flags* f, CommonFlags* cf) { f->quarantine_size_mb = quarantine_size_mb; f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; f->redzone = min_redzone; @@ -370,7 +368,7 @@ struct Allocator { : quarantine(LINKER_INITIALIZED), fallback_quarantine_cache(LINKER_INITIALIZED) {} - void CheckOptions(const AllocatorOptions &options) const { + void CheckOptions(const AllocatorOptions& options) const { CHECK_GE(options.min_redzone, 16); CHECK_GE(options.max_redzone, options.min_redzone); CHECK_LE(options.max_redzone, 2048); @@ -378,7 +376,7 @@ struct Allocator { CHECK(IsPowerOfTwo(options.max_redzone)); } - void SharedInitCode(const AllocatorOptions &options) { + void SharedInitCode(const AllocatorOptions& options) { CheckOptions(options); quarantine.Init((uptr)options.quarantine_size_mb << 20, (uptr)options.thread_local_quarantine_size_kb << 10); @@ -388,7 +386,7 @@ struct Allocator { atomic_store(&max_redzone, options.max_redzone, memory_order_release); } - void InitLinkerInitialized(const AllocatorOptions &options) { + void InitLinkerInitialized(const AllocatorOptions& options) { SetAllocatorMayReturnNull(options.may_return_null); #if SANITIZER_AMDGPU allocator.InitLinkerInitialized(options.release_to_os_interval_ms, 0, true); @@ -405,8 +403,8 @@ struct Allocator { void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. - AsanChunk *ac = GetAsanChunk((void *)chunk); - uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk); + AsanChunk* ac = GetAsanChunk((void*)chunk); + uptr allocated_size = allocator.GetActuallyAllocatedSize((void*)chunk); if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == CHUNK_ALLOCATED) { uptr beg = ac->Beg(); @@ -429,28 +427,28 @@ struct Allocator { } // Apply provided AllocatorOptions to an Allocator - void ApplyOptions(const AllocatorOptions &options) { + void ApplyOptions(const AllocatorOptions& options) { SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); } - void ReInitialize(const AllocatorOptions &options) { + void ReInitialize(const AllocatorOptions& options) { ApplyOptions(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { allocator.ForceLock(); allocator.ForEachChunk( - [](uptr chunk, void *alloc) { - ((Allocator *)alloc)->RePoisonChunk(chunk); + [](uptr chunk, void* alloc) { + ((Allocator*)alloc)->RePoisonChunk(chunk); }, this); allocator.ForceUnlock(); } } - void GetOptions(AllocatorOptions *options) const { + void GetOptions(AllocatorOptions* options) const { options->quarantine_size_mb = quarantine.GetMaxSize() >> 20; options->thread_local_quarantine_size_kb = quarantine.GetMaxCacheSize() >> 10; @@ -493,8 +491,8 @@ struct Allocator { } // We have an address between two chunks, and we want to report just one. - AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, - AsanChunk *right_chunk) { + AsanChunk* ChooseChunk(uptr addr, AsanChunk* left_chunk, + AsanChunk* right_chunk) { if (!left_chunk) return right_chunk; if (!right_chunk) @@ -523,21 +521,23 @@ struct Allocator { return right_chunk; } - bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { - AsanChunk *m = GetAsanChunkByAddr(addr); - if (!m) return false; + bool UpdateAllocationStack(uptr addr, BufferedStackTrace* stack) { + AsanChunk* m = GetAsanChunkByAddr(addr); + if (!m) + return false; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return false; - if (m->Beg() != addr) return false; - AsanThread *t = GetCurrentThread(); + if (m->Beg() != addr) + return false; + AsanThread* t = GetCurrentThread(); m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); return true; } // -------------------- Allocation/Deallocation routines --------------- - void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + void* Allocate(uptr size, uptr alignment, BufferedStackTrace* stack, AllocType alloc_type, bool can_fill, - DeviceAllocationInfo *da_info = nullptr) { + DeviceAllocationInfo* da_info = nullptr) { if (UNLIKELY(!AsanInited())) AsanInitFromRtl(); if (UNLIKELY(IsRssLimitExceeded())) { @@ -545,7 +545,7 @@ struct Allocator { return nullptr; ReportRssLimitExceeded(stack); } - Flags &fl = *flags(); + Flags& fl = *flags(); CHECK(stack); const uptr min_alignment = ASAN_SHADOW_GRANULARITY; const uptr user_requested_alignment_log = @@ -588,14 +588,14 @@ struct Allocator { ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); } - AsanThread *t = GetCurrentThread(); - void *allocated; + AsanThread* t = GetCurrentThread(); + void* allocated; if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + AllocatorCache* cache = GetAllocatorCache(&t->malloc_storage()); allocated = allocator.Allocate(cache, needed_size, 8, da_info); } else { SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; + AllocatorCache* cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8, da_info); } if (UNLIKELY(!allocated)) { @@ -613,7 +613,7 @@ struct Allocator { uptr user_end = user_beg + size; CHECK_LE(user_end, alloc_end); uptr chunk_beg = user_beg - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); + AsanChunk* m = reinterpret_cast(chunk_beg); m->alloc_type = alloc_type; CHECK(size); m->SetUsedSize(size); @@ -621,7 +621,7 @@ struct Allocator { m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); - if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) { + if (!from_primary || *(u8*)MEM_TO_SHADOW((uptr)allocated) == 0) { // The allocator provides an unpoisoned chunk. This is possible for the // secondary allocator, or if CanPoisonMemory() was false for some time, // for example, due to flags()->start_disabled. Anyway, poison left and @@ -639,8 +639,8 @@ struct Allocator { PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); // Deal with the end of the region if size is not aligned to granularity. if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { - u8 *shadow = - (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); + u8* shadow = + (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; } @@ -648,7 +648,7 @@ struct Allocator { PoisonShadow(user_beg, ASAN_SHADOW_GRANULARITY, kAsanHeapLeftRedzoneMagic); - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; @@ -657,7 +657,7 @@ struct Allocator { else thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; - void *res = reinterpret_cast(user_beg); + void* res = reinterpret_cast(user_beg); if (can_fill && fl.max_malloc_fill_size) { uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); REAL(memset)(res, fl.malloc_fill_byte, fill_size); @@ -670,7 +670,7 @@ struct Allocator { atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); if (alloc_beg != chunk_beg) { CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); - reinterpret_cast(alloc_beg)->Set(m); + reinterpret_cast(alloc_beg)->Set(m); } RunMallocHooks(res, size); return res; @@ -678,8 +678,8 @@ struct Allocator { // Set quarantine flag if chunk is allocated, issue ASan error report on // available and quarantined chunks. Return true on success, false otherwise. - bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, - BufferedStackTrace *stack) { + bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk* m, void* ptr, + BufferedStackTrace* stack) { u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, @@ -697,38 +697,38 @@ struct Allocator { // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. - void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { + void QuarantineChunk(AsanChunk* m, void* ptr, BufferedStackTrace* stack) { CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed), CHUNK_QUARANTINE); - AsanThread *t = GetCurrentThread(); + AsanThread* t = GetCurrentThread(); m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack)); // Push into quarantine. if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - AllocatorCache *ac = GetAllocatorCache(ms); + AsanThreadLocalMallocStorage* ms = &t->malloc_storage(); + AllocatorCache* ac = GetAllocatorCache(ms); quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, m->UsedSize()); } else { SpinMutexLock l(&fallback_mutex); - AllocatorCache *ac = &fallback_allocator_cache; + AllocatorCache* ac = &fallback_allocator_cache; quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), m, m->UsedSize()); } } - void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, - BufferedStackTrace *stack, AllocType alloc_type) { + void Deallocate(void* ptr, uptr delete_size, uptr delete_alignment, + BufferedStackTrace* stack, AllocType alloc_type) { uptr p = reinterpret_cast(ptr); - if (p == 0) return; + if (p == 0) + return; uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); + AsanChunk* m = reinterpret_cast(chunk_beg); // On Windows, uninstrumented DLLs may allocate memory before ASan hooks // malloc. Don't report an invalid free in this case. - if (SANITIZER_WINDOWS && - !get_allocator().PointerIsMine(ptr)) { + if (SANITIZER_WINDOWS && !get_allocator().PointerIsMine(ptr)) { if (!IsSystemHeapAddress(p)) ReportFreeNotMalloced(p, stack); return; @@ -746,7 +746,8 @@ struct Allocator { // Must mark the chunk as quarantined before any changes to its metadata. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. - if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; + if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) + return; if (m->alloc_type != alloc_type) { if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) && @@ -764,24 +765,24 @@ struct Allocator { } } - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.frees++; thread_stats.freed += m->UsedSize(); QuarantineChunk(m, ptr, stack); } - void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { + void* Reallocate(void* old_ptr, uptr new_size, BufferedStackTrace* stack) { CHECK(old_ptr && new_size); uptr p = reinterpret_cast(old_ptr); uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); + AsanChunk* m = reinterpret_cast(chunk_beg); - AsanStats &thread_stats = GetCurrentThreadStats(); + AsanStats& thread_stats = GetCurrentThreadStats(); thread_stats.reallocs++; thread_stats.realloced += new_size; - void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + void* new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); if (chunk_state != CHUNK_ALLOCATED) @@ -796,13 +797,13 @@ struct Allocator { return new_ptr; } - void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + void* Calloc(uptr nmemb, uptr size, BufferedStackTrace* stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { if (AllocatorMayReturnNull()) return nullptr; ReportCallocOverflow(nmemb, size, stack); } - void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + void* ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. if (ptr && allocator.FromPrimary(ptr)) @@ -810,15 +811,15 @@ struct Allocator { return ptr; } - void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + void ReportInvalidFree(void* ptr, u8 chunk_state, BufferedStackTrace* stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else ReportFreeNotMalloced((uptr)ptr, stack); } - void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { - AllocatorCache *ac = GetAllocatorCache(ms); + void CommitBack(AsanThreadLocalMallocStorage* ms, BufferedStackTrace* stack) { + AllocatorCache* ac = GetAllocatorCache(ms); quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); allocator.SwallowCache(ac); } @@ -829,14 +830,14 @@ struct Allocator { // Returns nullptr if AsanChunk is not yet initialized just after // get_allocator().Allocate(), or is being destroyed just before // get_allocator().Deallocate(). - AsanChunk *GetAsanChunk(void *alloc_beg) { + AsanChunk* GetAsanChunk(void* alloc_beg) { if (!alloc_beg) return nullptr; - AsanChunk *p = reinterpret_cast(alloc_beg)->Get(); + AsanChunk* p = reinterpret_cast(alloc_beg)->Get(); if (!p) { if (!allocator.FromPrimary(alloc_beg)) return nullptr; - p = reinterpret_cast(alloc_beg); + p = reinterpret_cast(alloc_beg); } u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); // It does not guaranty that Chunk is initialized, but it's @@ -846,42 +847,45 @@ struct Allocator { return nullptr; } - AsanChunk *GetAsanChunkByAddr(uptr p) { - void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); + AsanChunk* GetAsanChunkByAddr(uptr p) { + void* alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } // Allocator must be locked when this function is called. - AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { - void *alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + AsanChunk* GetAsanChunkByAddrFastLocked(uptr p) { + void* alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } uptr AllocationSize(uptr p) { - AsanChunk *m = GetAsanChunkByAddr(p); - if (!m) return 0; + AsanChunk* m = GetAsanChunkByAddr(p); + if (!m) + return 0; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return 0; - if (m->Beg() != p) return 0; + if (m->Beg() != p) + return 0; return m->UsedSize(); } uptr AllocationSizeFast(uptr p) { - return reinterpret_cast(p - kChunkHeaderSize)->UsedSize(); + return reinterpret_cast(p - kChunkHeaderSize)->UsedSize(); } AsanChunkView FindHeapChunkByAddress(uptr addr) { - AsanChunk *m1 = GetAsanChunkByAddr(addr); + AsanChunk* m1 = GetAsanChunkByAddr(addr); sptr offset = 0; if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { // The address is in the chunk's left redzone, so maybe it is actually // a right buffer overflow from the other chunk before. // Search a bit before to see if there is another chunk. - AsanChunk *m2 = nullptr; + AsanChunk* m2 = nullptr; for (uptr l = 1; l < GetPageSizeCached(); l++) { m2 = GetAsanChunkByAddr(addr - l); - if (m2 == m1) continue; // Still the same chunk. + if (m2 == m1) + continue; // Still the same chunk. break; } if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) @@ -890,19 +894,19 @@ struct Allocator { return AsanChunkView(m1); } - void Purge(BufferedStackTrace *stack) { - AsanThread *t = GetCurrentThread(); + void Purge(BufferedStackTrace* stack) { + AsanThread* t = GetCurrentThread(); if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - quarantine.DrainAndRecycle(GetQuarantineCache(ms), - QuarantineCallback(GetAllocatorCache(ms), - stack)); + AsanThreadLocalMallocStorage* ms = &t->malloc_storage(); + quarantine.DrainAndRecycle( + GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms), stack)); } { SpinMutexLock l(&fallback_mutex); - quarantine.DrainAndRecycle(&fallback_quarantine_cache, - QuarantineCallback(&fallback_allocator_cache, - stack)); + quarantine.DrainAndRecycle( + &fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache, stack)); } allocator.ForceReleaseToOS(); @@ -926,9 +930,7 @@ struct Allocator { static Allocator instance(LINKER_INITIALIZED); -static AsanAllocator &get_allocator() { - return instance.allocator; -} +static AsanAllocator& get_allocator() { return instance.allocator; } bool AsanChunkView::IsValid() const { return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != @@ -985,20 +987,20 @@ u32 AsanChunkView::GetFreeStackId() const { return stack; } -void InitializeAllocator(const AllocatorOptions &options) { +void InitializeAllocator(const AllocatorOptions& options) { instance.InitLinkerInitialized(options); } -void ReInitializeAllocator(const AllocatorOptions &options) { +void ReInitializeAllocator(const AllocatorOptions& options) { instance.ReInitialize(options); } // Apply provided AllocatorOptions to an Allocator -void ApplyAllocatorOptions(const AllocatorOptions &options) { +void ApplyAllocatorOptions(const AllocatorOptions& options) { instance.ApplyOptions(options); } -void GetAllocatorOptions(AllocatorOptions *options) { +void GetAllocatorOptions(AllocatorOptions* options) { instance.GetOptions(options); } @@ -1014,24 +1016,22 @@ void AsanThreadLocalMallocStorage::CommitBack() { instance.CommitBack(this, &stack); } -void PrintInternalAllocatorStats() { - instance.PrintStats(); -} +void PrintInternalAllocatorStats() { instance.PrintStats(); } -void asan_free(void *ptr, BufferedStackTrace *stack) { +void asan_free(void* ptr, BufferedStackTrace* stack) { instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC); } -void *asan_malloc(uptr size, BufferedStackTrace *stack) { +void* asan_malloc(uptr size, BufferedStackTrace* stack) { return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); } -void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { +void* asan_calloc(uptr nmemb, uptr size, BufferedStackTrace* stack) { return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); } -void *asan_reallocarray(void *p, uptr nmemb, uptr size, - BufferedStackTrace *stack) { +void* asan_reallocarray(void* p, uptr nmemb, uptr size, + BufferedStackTrace* stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { errno = errno_ENOMEM; if (AllocatorMayReturnNull()) @@ -1041,7 +1041,7 @@ void *asan_reallocarray(void *p, uptr nmemb, uptr size, return asan_realloc(p, nmemb * size, stack); } -void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { +void* asan_realloc(void* p, uptr size, BufferedStackTrace* stack) { if (!p) return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); if (size == 0) { @@ -1055,12 +1055,12 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { return SetErrnoOnNull(instance.Reallocate(p, size, stack)); } -void *asan_valloc(uptr size, BufferedStackTrace *stack) { +void* asan_valloc(uptr size, BufferedStackTrace* stack) { return SetErrnoOnNull( instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); } -void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { +void* asan_pvalloc(uptr size, BufferedStackTrace* stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; @@ -1074,7 +1074,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); } -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) { +void* asan_memalign(uptr alignment, uptr size, BufferedStackTrace* stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; if (AllocatorMayReturnNull()) @@ -1085,7 +1085,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) { instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); } -void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { +void* asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace* stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; if (AllocatorMayReturnNull()) @@ -1096,14 +1096,14 @@ void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); } -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - BufferedStackTrace *stack) { +int asan_posix_memalign(void** memptr, uptr alignment, uptr size, + BufferedStackTrace* stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { if (AllocatorMayReturnNull()) return errno_EINVAL; ReportInvalidPosixMemalignAlignment(alignment, stack); } - void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + void* ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); if (UNLIKELY(!ptr)) // OOM error is already taken care of by Allocate. return errno_ENOMEM; @@ -1112,8 +1112,9 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size, return 0; } -uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { - if (!ptr) return 0; +uptr asan_malloc_usable_size(const void* ptr, uptr pc, uptr bp) { + if (!ptr) + return 0; uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); if (flags()->check_malloc_usable_size && (usable_size == 0)) { GET_STACK_TRACE_FATAL(pc, bp); @@ -1124,12 +1125,12 @@ uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { namespace { -void *asan_new(uptr size, BufferedStackTrace *stack, bool array) { +void* asan_new(uptr size, BufferedStackTrace* stack, bool array) { return SetErrnoOnNull( instance.Allocate(size, 0, stack, array ? FROM_NEW_BR : FROM_NEW, true)); } -void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack, +void* asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace* stack, bool array) { if (UNLIKELY(alignment == 0 || !IsPowerOfTwo(alignment))) { errno = errno_EINVAL; @@ -1141,81 +1142,81 @@ void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack, size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW, true)); } -void asan_delete(void *ptr, BufferedStackTrace *stack, bool array) { +void asan_delete(void* ptr, BufferedStackTrace* stack, bool array) { instance.Deallocate(ptr, 0, 0, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack, +void asan_delete_aligned(void* ptr, uptr alignment, BufferedStackTrace* stack, bool array) { instance.Deallocate(ptr, 0, alignment, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack, +void asan_delete_sized(void* ptr, uptr size, BufferedStackTrace* stack, bool array) { instance.Deallocate(ptr, size, 0, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack, bool array) { +void asan_delete_sized_aligned(void* ptr, uptr size, uptr alignment, + BufferedStackTrace* stack, bool array) { instance.Deallocate(ptr, size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW); } } // namespace -void *asan_new(uptr size, BufferedStackTrace *stack) { +void* asan_new(uptr size, BufferedStackTrace* stack) { return asan_new(size, stack, /*array=*/false); } -void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack) { +void* asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace* stack) { return asan_new_aligned(size, alignment, stack, /*array=*/false); } -void *asan_new_array(uptr size, BufferedStackTrace *stack) { +void* asan_new_array(uptr size, BufferedStackTrace* stack) { return asan_new(size, stack, /*array=*/true); } -void *asan_new_array_aligned(uptr size, uptr alignment, - BufferedStackTrace *stack) { +void* asan_new_array_aligned(uptr size, uptr alignment, + BufferedStackTrace* stack) { return asan_new_aligned(size, alignment, stack, /*array=*/true); } -void asan_delete(void *ptr, BufferedStackTrace *stack) { +void asan_delete(void* ptr, BufferedStackTrace* stack) { asan_delete(ptr, stack, /*array=*/false); } -void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack) { +void asan_delete_aligned(void* ptr, uptr alignment, BufferedStackTrace* stack) { asan_delete_aligned(ptr, alignment, stack, /*array=*/false); } -void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack) { +void asan_delete_sized(void* ptr, uptr size, BufferedStackTrace* stack) { asan_delete_sized(ptr, size, stack, /*array=*/false); } -void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack) { +void asan_delete_sized_aligned(void* ptr, uptr size, uptr alignment, + BufferedStackTrace* stack) { asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/false); } -void asan_delete_array(void *ptr, BufferedStackTrace *stack) { +void asan_delete_array(void* ptr, BufferedStackTrace* stack) { asan_delete(ptr, stack, /*array=*/true); } -void asan_delete_array_aligned(void *ptr, uptr alignment, - BufferedStackTrace *stack) { +void asan_delete_array_aligned(void* ptr, uptr alignment, + BufferedStackTrace* stack) { asan_delete_aligned(ptr, alignment, stack, /*array=*/true); } -void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack) { +void asan_delete_array_sized(void* ptr, uptr size, BufferedStackTrace* stack) { asan_delete_sized(ptr, size, stack, /*array=*/true); } -void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack) { +void asan_delete_array_sized_aligned(void* ptr, uptr size, uptr alignment, + BufferedStackTrace* stack) { asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/true); } -uptr asan_mz_size(const void *ptr) { +uptr asan_mz_size(const void* ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } @@ -1231,22 +1232,18 @@ void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { -void LockAllocator() { - __asan::get_allocator().ForceLock(); -} +void LockAllocator() { __asan::get_allocator().ForceLock(); } -void UnlockAllocator() { - __asan::get_allocator().ForceUnlock(); -} +void UnlockAllocator() { __asan::get_allocator().ForceUnlock(); } -void GetAllocatorGlobalRange(uptr *begin, uptr *end) { +void GetAllocatorGlobalRange(uptr* begin, uptr* end) { *begin = (uptr)&__asan::get_allocator(); *end = *begin + sizeof(__asan::get_allocator()); } -uptr PointsIntoChunk(void *p) { +uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != __asan::CHUNK_ALLOCATED) return 0; @@ -1261,57 +1258,55 @@ uptr PointsIntoChunk(void *p) { uptr GetUserBegin(uptr chunk) { // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is // not needed. - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); return m ? m->Beg() : 0; } -uptr GetUserAddr(uptr chunk) { - return chunk; -} +uptr GetUserAddr(uptr chunk) { return chunk; } LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = chunk ? reinterpret_cast(chunk - __asan::kChunkHeaderSize) + metadata_ = chunk ? reinterpret_cast(chunk - __asan::kChunkHeaderSize) : nullptr; } bool LsanMetadata::allocated() const { if (!metadata_) return false; - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); return atomic_load(&m->chunk_state, memory_order_relaxed) == __asan::CHUNK_ALLOCATED; } ChunkTag LsanMetadata::tag() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); return static_cast(m->lsan_tag); } void LsanMetadata::set_tag(ChunkTag value) { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); m->lsan_tag = value; } uptr LsanMetadata::requested_size() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); return m->UsedSize(); } u32 LsanMetadata::stack_trace_id() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); u32 tid = 0; u32 stack = 0; m->GetAllocContext(tid, stack); return stack; } -void ForEachChunk(ForEachChunkCallback callback, void *arg) { +void ForEachChunk(ForEachChunkCallback callback, void* arg) { __asan::get_allocator().ForEachChunk(callback, arg); } -IgnoreObjectResult IgnoreObject(const void *p) { +IgnoreObjectResult IgnoreObject(const void* p) { uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); + __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddr(addr); if (!m || (atomic_load(&m->chunk_state, memory_order_acquire) != __asan::CHUNK_ALLOCATED) || @@ -1329,30 +1324,29 @@ IgnoreObjectResult IgnoreObject(const void *p) { // ---------------------- Interface ---------------- {{{1 using namespace __asan; -static const void *AllocationBegin(const void *p) { - AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p); +static const void* AllocationBegin(const void* p) { + AsanChunk* m = __asan::instance.GetAsanChunkByAddr((uptr)p); if (!m) return nullptr; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return nullptr; if (m->UsedSize() == 0) return nullptr; - return (const void *)(m->Beg()); + return (const void*)(m->Beg()); } // ASan allocator doesn't reserve extra bytes, so normally we would // just return "size". We don't want to expose our redzone sizes, etc here. -uptr __sanitizer_get_estimated_allocated_size(uptr size) { - return size; -} +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } -int __sanitizer_get_ownership(const void *p) { +int __sanitizer_get_ownership(const void* p) { uptr ptr = reinterpret_cast(p); return instance.AllocationSize(ptr) > 0; } -uptr __sanitizer_get_allocated_size(const void *p) { - if (!p) return 0; +uptr __sanitizer_get_allocated_size(const void* p) { + if (!p) + return 0; uptr ptr = reinterpret_cast(p); uptr allocated_size = instance.AllocationSize(ptr); // Die if p is not malloced or if it is already freed. @@ -1363,14 +1357,14 @@ uptr __sanitizer_get_allocated_size(const void *p) { return allocated_size; } -uptr __sanitizer_get_allocated_size_fast(const void *p) { +uptr __sanitizer_get_allocated_size_fast(const void* p) { DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); uptr ret = instance.AllocationSizeFast(reinterpret_cast(p)); DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); return ret; } -const void *__sanitizer_get_allocated_begin(const void *p) { +const void* __sanitizer_get_allocated_begin(const void* p) { return AllocationBegin(p); } @@ -1386,17 +1380,18 @@ int __asan_update_allocation_context(void* addr) { #if SANITIZER_AMDGPU DECLARE_REAL(hsa_status_t, hsa_amd_agents_allow_access, uint32_t num_agents, - const hsa_agent_t *agents, const uint32_t *flags, const void *ptr) + const hsa_agent_t* agents, const uint32_t* flags, const void* ptr) DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_allocate, - hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, - void **ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_free, void *ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_create, void *ptr, size_t len, - hsa_amd_ipc_memory_t *handle) + hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, + void** ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_free, void* ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_create, void* ptr, size_t len, + hsa_amd_ipc_memory_t* handle) DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_attach, - const hsa_amd_ipc_memory_t *handle, size_t len, uint32_t num_agents, - const hsa_agent_t *mapping_agents, void **mapped_ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void *mapped_ptr) + const hsa_amd_ipc_memory_t* handle, size_t len, + uint32_t num_agents, const hsa_agent_t* mapping_agents, + void** mapped_ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void* mapped_ptr) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_reserve_align, void** ptr, size_t size, uint64_t address, uint64_t alignment, uint64_t flags) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) @@ -1410,24 +1405,23 @@ namespace __asan { static const size_t kPageSize_ = 4096; hsa_status_t asan_hsa_amd_memory_pool_allocate( - hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, void **ptr, - BufferedStackTrace *stack) { + hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, void** ptr, + BufferedStackTrace* stack) { AmdgpuAllocationInfo aa_info; aa_info.alloc_func = - reinterpret_cast(asan_hsa_amd_memory_pool_allocate); + reinterpret_cast(asan_hsa_amd_memory_pool_allocate); aa_info.memory_pool = memory_pool; aa_info.size = size; aa_info.flags = flags; aa_info.ptr = nullptr; - SetErrnoOnNull(*ptr = instance.Allocate(size, kPageSize_, stack, - FROM_MALLOC, false, &aa_info)); + SetErrnoOnNull(*ptr = instance.Allocate(size, kPageSize_, stack, FROM_MALLOC, + false, &aa_info)); return aa_info.status; } -hsa_status_t asan_hsa_amd_memory_pool_free( - void *ptr, - BufferedStackTrace *stack) { - void *p = get_allocator().GetBlockBegin(ptr); +hsa_status_t asan_hsa_amd_memory_pool_free(void* ptr, + BufferedStackTrace* stack) { + void* p = get_allocator().GetBlockBegin(ptr); if (p) { instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC); return HSA_STATUS_SUCCESS; @@ -1435,11 +1429,12 @@ hsa_status_t asan_hsa_amd_memory_pool_free( return REAL(hsa_amd_memory_pool_free)(ptr); } -hsa_status_t asan_hsa_amd_agents_allow_access( - uint32_t num_agents, const hsa_agent_t *agents, const uint32_t *flags, - const void *ptr, - BufferedStackTrace *stack) { - void *p = get_allocator().GetBlockBegin(ptr); +hsa_status_t asan_hsa_amd_agents_allow_access(uint32_t num_agents, + const hsa_agent_t* agents, + const uint32_t* flags, + const void* ptr, + BufferedStackTrace* stack) { + void* p = get_allocator().GetBlockBegin(ptr); return REAL(hsa_amd_agents_allow_access)(num_agents, agents, flags, p ? p : ptr); } @@ -1449,11 +1444,11 @@ hsa_status_t asan_hsa_amd_agents_allow_access( // is always one kPageSize_ // IPC calls use static_assert to make sure kMetadataSize = 0 // -#if SANITIZER_CAN_USE_ALLOCATOR64 +# if SANITIZER_CAN_USE_ALLOCATOR64 static struct AP64 AP_; -#else +# else static struct AP32 AP_; -#endif +# endif hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, hsa_amd_ipc_memory_t* handle) { @@ -1473,24 +1468,25 @@ hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, return REAL(hsa_amd_ipc_memory_create)(ptr, len, handle); } -hsa_status_t asan_hsa_amd_ipc_memory_attach(const hsa_amd_ipc_memory_t *handle, - size_t len, uint32_t num_agents, const hsa_agent_t *mapping_agents, - void **mapped_ptr) { +hsa_status_t asan_hsa_amd_ipc_memory_attach(const hsa_amd_ipc_memory_t* handle, + size_t len, uint32_t num_agents, + const hsa_agent_t* mapping_agents, + void** mapped_ptr) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); size_t len_ = len + kPageSize_; hsa_status_t status = REAL(hsa_amd_ipc_memory_attach)( - handle, len_, num_agents, mapping_agents, mapped_ptr); + handle, len_, num_agents, mapping_agents, mapped_ptr); if (status == HSA_STATUS_SUCCESS && mapped_ptr) { - *mapped_ptr = reinterpret_cast(reinterpret_cast(*mapped_ptr) + - kPageSize_); + *mapped_ptr = reinterpret_cast(reinterpret_cast(*mapped_ptr) + + kPageSize_); } return status; } -hsa_status_t asan_hsa_amd_ipc_memory_detach(void *mapped_ptr) { +hsa_status_t asan_hsa_amd_ipc_memory_detach(void* mapped_ptr) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); - void *mapped_ptr_ = - reinterpret_cast(reinterpret_cast(mapped_ptr) - kPageSize_); + void* mapped_ptr_ = + reinterpret_cast(reinterpret_cast(mapped_ptr) - kPageSize_); return REAL(hsa_amd_ipc_memory_detach)(mapped_ptr_); } @@ -1555,11 +1551,11 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, hsa_agent_t** accessible) { void* ptr_ = get_allocator().GetBlockBegin(ptr); AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)); - hsa_status_t status; - if (ptr_ && m) + hsa_status_t status = HSA_STATUS_ERROR_NOT_INITIALIZED; + if (ptr_ && m){ status = REAL(hsa_amd_pointer_info)(ptr, info, alloc, num_agents_accessible, accessible); - if (status == HSA_STATUS_SUCCESS && info && ptr_ && m) { + if (status == HSA_STATUS_SUCCESS && info) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); info->agentBaseAddress = reinterpret_cast( reinterpret_cast(info->agentBaseAddress) + kPageSize_); @@ -1567,6 +1563,7 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, reinterpret_cast(info->hostBaseAddress) + kPageSize_); info->sizeInBytes = m->UsedSize(); } +} return status; } From 20bc0db1ba9377bc7d1c6b754e5133deb9cbaadd Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Fri, 14 Nov 2025 10:43:34 +0530 Subject: [PATCH 6/8] Revert "Improve code for edge cases testing." This reverts commit 00898ca25fba0c25f3f1320f7cde6ab07d49434b. --- compiler-rt/lib/asan/asan_allocator.cpp | 461 ++++++++++++------------ 1 file changed, 232 insertions(+), 229 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 250e82f14e1ec..7b6bc9268f54e 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -52,9 +52,9 @@ static u32 RZSize2Log(u32 rz_size) { return res; } -static AsanAllocator& get_allocator(); +static AsanAllocator &get_allocator(); -static void AtomicContextStore(volatile atomic_uint64_t* atomic_context, +static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, u32 tid, u32 stack) { u64 context = tid; context <<= 32; @@ -62,8 +62,8 @@ static void AtomicContextStore(volatile atomic_uint64_t* atomic_context, atomic_store(atomic_context, context, memory_order_relaxed); } -static void AtomicContextLoad(const volatile atomic_uint64_t* atomic_context, - u32& tid, u32& stack) { +static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context, + u32 &tid, u32 &stack) { u64 context = atomic_load(atomic_context, memory_order_relaxed); stack = context; context >>= 32; @@ -123,7 +123,7 @@ class ChunkHeader { AtomicContextStore(&alloc_context_id, tid, stack); } - void GetAllocContext(u32& tid, u32& stack) const { + void GetAllocContext(u32 &tid, u32 &stack) const { AtomicContextLoad(&alloc_context_id, tid, stack); } }; @@ -136,7 +136,7 @@ class ChunkBase : public ChunkHeader { AtomicContextStore(&free_context_id, tid, stack); } - void GetFreeContext(u32& tid, u32& stack) const { + void GetFreeContext(u32 &tid, u32 &stack) const { AtomicContextLoad(&free_context_id, tid, stack); } }; @@ -169,16 +169,16 @@ class LargeChunkHeader { static constexpr uptr kAllocBegMagic = FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL); atomic_uintptr_t magic; - AsanChunk* chunk_header; + AsanChunk *chunk_header; public: - AsanChunk* Get() const { + AsanChunk *Get() const { return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic ? chunk_header : nullptr; } - void Set(AsanChunk* p) { + void Set(AsanChunk *p) { if (p) { chunk_header = p; atomic_store(&magic, kAllocBegMagic, memory_order_release); @@ -193,9 +193,9 @@ class LargeChunkHeader { } }; -static void FillChunk(AsanChunk* m) { +static void FillChunk(AsanChunk *m) { // FIXME: Use ReleaseMemoryPagesToOS. - Flags& fl = *flags(); + Flags &fl = *flags(); if (fl.max_free_fill_size > 0) { // We have to skip the chunk header, it contains free_context_id. @@ -203,24 +203,26 @@ static void FillChunk(AsanChunk* m) { if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); - REAL(memset)((void*)scribble_start, fl.free_fill_byte, size_to_fill); + REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); } } } struct QuarantineCallback { - QuarantineCallback(AllocatorCache* cache, BufferedStackTrace* stack) - : cache_(cache), stack_(stack) {} + QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) + : cache_(cache), + stack_(stack) { + } - void PreQuarantine(AsanChunk* m) const { + void PreQuarantine(AsanChunk *m) const { FillChunk(m); // Poison the region. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), kAsanHeapFreeMagic); } - void Recycle(AsanChunk* m) const { - void* p = get_allocator().GetBlockBegin(m); + void Recycle(AsanChunk *m) const { + void *p = get_allocator().GetBlockBegin(m); // The secondary will immediately unpoison and unmap the memory, so this // branch is unnecessary. @@ -228,7 +230,7 @@ struct QuarantineCallback { if (p != m) { // Clear the magic value, as allocator internals may overwrite the // contents of deallocated chunk, confusing GetAsanChunk lookup. - reinterpret_cast(p)->Set(nullptr); + reinterpret_cast(p)->Set(nullptr); } u8 old_chunk_state = CHUNK_QUARANTINE; @@ -243,14 +245,14 @@ struct QuarantineCallback { } // Statistics. - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; thread_stats.really_freed += m->UsedSize(); get_allocator().Deallocate(cache_, p); } - void RecyclePassThrough(AsanChunk* m) const { + void RecyclePassThrough(AsanChunk *m) const { // Recycle for the secondary will immediately unpoison and unmap the // memory, so quarantine preparation is unnecessary. if (get_allocator().FromPrimary(m)) { @@ -260,15 +262,15 @@ struct QuarantineCallback { Recycle(m); } - void* Allocate(uptr size) const { - void* res = get_allocator().Allocate(cache_, size, 1); + void *Allocate(uptr size) const { + void *res = get_allocator().Allocate(cache_, size, 1); // TODO(alekseys): Consider making quarantine OOM-friendly. if (UNLIKELY(!res)) ReportOutOfMemory(size, stack_); return res; } - void Deallocate(void* p) const { get_allocator().Deallocate(cache_, p); } + void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); } private: AllocatorCache* const cache_; @@ -281,7 +283,7 @@ typedef AsanQuarantine::Cache QuarantineCache; void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); // Statistics. - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } @@ -296,7 +298,7 @@ void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin, PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic); PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic); // Statistics. - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } @@ -307,7 +309,7 @@ void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { // Mark the corresponding shadow memory as not needed. FlushUnneededASanShadowMemory(p, size); // Statistics. - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.munmaps++; thread_stats.munmaped += size; } @@ -315,18 +317,18 @@ void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { // We can not use THREADLOCAL because it is not supported on some of the // platforms we care about (OSX 10.6, Android). // static THREADLOCAL AllocatorCache cache; -AllocatorCache* GetAllocatorCache(AsanThreadLocalMallocStorage* ms) { +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); return &ms->allocator_cache; } -QuarantineCache* GetQuarantineCache(AsanThreadLocalMallocStorage* ms) { +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); - return reinterpret_cast(ms->quarantine_cache); + return reinterpret_cast(ms->quarantine_cache); } -void AllocatorOptions::SetFrom(const Flags* f, const CommonFlags* cf) { +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { quarantine_size_mb = f->quarantine_size_mb; thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; min_redzone = f->redzone; @@ -336,7 +338,7 @@ void AllocatorOptions::SetFrom(const Flags* f, const CommonFlags* cf) { release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; } -void AllocatorOptions::CopyTo(Flags* f, CommonFlags* cf) { +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { f->quarantine_size_mb = quarantine_size_mb; f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; f->redzone = min_redzone; @@ -368,7 +370,7 @@ struct Allocator { : quarantine(LINKER_INITIALIZED), fallback_quarantine_cache(LINKER_INITIALIZED) {} - void CheckOptions(const AllocatorOptions& options) const { + void CheckOptions(const AllocatorOptions &options) const { CHECK_GE(options.min_redzone, 16); CHECK_GE(options.max_redzone, options.min_redzone); CHECK_LE(options.max_redzone, 2048); @@ -376,7 +378,7 @@ struct Allocator { CHECK(IsPowerOfTwo(options.max_redzone)); } - void SharedInitCode(const AllocatorOptions& options) { + void SharedInitCode(const AllocatorOptions &options) { CheckOptions(options); quarantine.Init((uptr)options.quarantine_size_mb << 20, (uptr)options.thread_local_quarantine_size_kb << 10); @@ -386,7 +388,7 @@ struct Allocator { atomic_store(&max_redzone, options.max_redzone, memory_order_release); } - void InitLinkerInitialized(const AllocatorOptions& options) { + void InitLinkerInitialized(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); #if SANITIZER_AMDGPU allocator.InitLinkerInitialized(options.release_to_os_interval_ms, 0, true); @@ -403,8 +405,8 @@ struct Allocator { void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. - AsanChunk* ac = GetAsanChunk((void*)chunk); - uptr allocated_size = allocator.GetActuallyAllocatedSize((void*)chunk); + AsanChunk *ac = GetAsanChunk((void *)chunk); + uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk); if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == CHUNK_ALLOCATED) { uptr beg = ac->Beg(); @@ -427,28 +429,28 @@ struct Allocator { } // Apply provided AllocatorOptions to an Allocator - void ApplyOptions(const AllocatorOptions& options) { + void ApplyOptions(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); } - void ReInitialize(const AllocatorOptions& options) { + void ReInitialize(const AllocatorOptions &options) { ApplyOptions(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { allocator.ForceLock(); allocator.ForEachChunk( - [](uptr chunk, void* alloc) { - ((Allocator*)alloc)->RePoisonChunk(chunk); + [](uptr chunk, void *alloc) { + ((Allocator *)alloc)->RePoisonChunk(chunk); }, this); allocator.ForceUnlock(); } } - void GetOptions(AllocatorOptions* options) const { + void GetOptions(AllocatorOptions *options) const { options->quarantine_size_mb = quarantine.GetMaxSize() >> 20; options->thread_local_quarantine_size_kb = quarantine.GetMaxCacheSize() >> 10; @@ -491,8 +493,8 @@ struct Allocator { } // We have an address between two chunks, and we want to report just one. - AsanChunk* ChooseChunk(uptr addr, AsanChunk* left_chunk, - AsanChunk* right_chunk) { + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, + AsanChunk *right_chunk) { if (!left_chunk) return right_chunk; if (!right_chunk) @@ -521,23 +523,21 @@ struct Allocator { return right_chunk; } - bool UpdateAllocationStack(uptr addr, BufferedStackTrace* stack) { - AsanChunk* m = GetAsanChunkByAddr(addr); - if (!m) - return false; + bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { + AsanChunk *m = GetAsanChunkByAddr(addr); + if (!m) return false; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return false; - if (m->Beg() != addr) - return false; - AsanThread* t = GetCurrentThread(); + if (m->Beg() != addr) return false; + AsanThread *t = GetCurrentThread(); m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); return true; } // -------------------- Allocation/Deallocation routines --------------- - void* Allocate(uptr size, uptr alignment, BufferedStackTrace* stack, + void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, AllocType alloc_type, bool can_fill, - DeviceAllocationInfo* da_info = nullptr) { + DeviceAllocationInfo *da_info = nullptr) { if (UNLIKELY(!AsanInited())) AsanInitFromRtl(); if (UNLIKELY(IsRssLimitExceeded())) { @@ -545,7 +545,7 @@ struct Allocator { return nullptr; ReportRssLimitExceeded(stack); } - Flags& fl = *flags(); + Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = ASAN_SHADOW_GRANULARITY; const uptr user_requested_alignment_log = @@ -588,14 +588,14 @@ struct Allocator { ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); } - AsanThread* t = GetCurrentThread(); - void* allocated; + AsanThread *t = GetCurrentThread(); + void *allocated; if (t) { - AllocatorCache* cache = GetAllocatorCache(&t->malloc_storage()); + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocated = allocator.Allocate(cache, needed_size, 8, da_info); } else { SpinMutexLock l(&fallback_mutex); - AllocatorCache* cache = &fallback_allocator_cache; + AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8, da_info); } if (UNLIKELY(!allocated)) { @@ -613,7 +613,7 @@ struct Allocator { uptr user_end = user_beg + size; CHECK_LE(user_end, alloc_end); uptr chunk_beg = user_beg - kChunkHeaderSize; - AsanChunk* m = reinterpret_cast(chunk_beg); + AsanChunk *m = reinterpret_cast(chunk_beg); m->alloc_type = alloc_type; CHECK(size); m->SetUsedSize(size); @@ -621,7 +621,7 @@ struct Allocator { m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); - if (!from_primary || *(u8*)MEM_TO_SHADOW((uptr)allocated) == 0) { + if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) { // The allocator provides an unpoisoned chunk. This is possible for the // secondary allocator, or if CanPoisonMemory() was false for some time, // for example, due to flags()->start_disabled. Anyway, poison left and @@ -639,8 +639,8 @@ struct Allocator { PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); // Deal with the end of the region if size is not aligned to granularity. if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { - u8* shadow = - (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); + u8 *shadow = + (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; } @@ -648,7 +648,7 @@ struct Allocator { PoisonShadow(user_beg, ASAN_SHADOW_GRANULARITY, kAsanHeapLeftRedzoneMagic); - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; @@ -657,7 +657,7 @@ struct Allocator { else thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; - void* res = reinterpret_cast(user_beg); + void *res = reinterpret_cast(user_beg); if (can_fill && fl.max_malloc_fill_size) { uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); REAL(memset)(res, fl.malloc_fill_byte, fill_size); @@ -670,7 +670,7 @@ struct Allocator { atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); if (alloc_beg != chunk_beg) { CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); - reinterpret_cast(alloc_beg)->Set(m); + reinterpret_cast(alloc_beg)->Set(m); } RunMallocHooks(res, size); return res; @@ -678,8 +678,8 @@ struct Allocator { // Set quarantine flag if chunk is allocated, issue ASan error report on // available and quarantined chunks. Return true on success, false otherwise. - bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk* m, void* ptr, - BufferedStackTrace* stack) { + bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, @@ -697,38 +697,38 @@ struct Allocator { // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. - void QuarantineChunk(AsanChunk* m, void* ptr, BufferedStackTrace* stack) { + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed), CHUNK_QUARANTINE); - AsanThread* t = GetCurrentThread(); + AsanThread *t = GetCurrentThread(); m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack)); // Push into quarantine. if (t) { - AsanThreadLocalMallocStorage* ms = &t->malloc_storage(); - AllocatorCache* ac = GetAllocatorCache(ms); + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, m->UsedSize()); } else { SpinMutexLock l(&fallback_mutex); - AllocatorCache* ac = &fallback_allocator_cache; + AllocatorCache *ac = &fallback_allocator_cache; quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), m, m->UsedSize()); } } - void Deallocate(void* ptr, uptr delete_size, uptr delete_alignment, - BufferedStackTrace* stack, AllocType alloc_type) { + void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, + BufferedStackTrace *stack, AllocType alloc_type) { uptr p = reinterpret_cast(ptr); - if (p == 0) - return; + if (p == 0) return; uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk* m = reinterpret_cast(chunk_beg); + AsanChunk *m = reinterpret_cast(chunk_beg); // On Windows, uninstrumented DLLs may allocate memory before ASan hooks // malloc. Don't report an invalid free in this case. - if (SANITIZER_WINDOWS && !get_allocator().PointerIsMine(ptr)) { + if (SANITIZER_WINDOWS && + !get_allocator().PointerIsMine(ptr)) { if (!IsSystemHeapAddress(p)) ReportFreeNotMalloced(p, stack); return; @@ -746,8 +746,7 @@ struct Allocator { // Must mark the chunk as quarantined before any changes to its metadata. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. - if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) - return; + if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; if (m->alloc_type != alloc_type) { if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) && @@ -765,24 +764,24 @@ struct Allocator { } } - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.frees++; thread_stats.freed += m->UsedSize(); QuarantineChunk(m, ptr, stack); } - void* Reallocate(void* old_ptr, uptr new_size, BufferedStackTrace* stack) { + void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { CHECK(old_ptr && new_size); uptr p = reinterpret_cast(old_ptr); uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk* m = reinterpret_cast(chunk_beg); + AsanChunk *m = reinterpret_cast(chunk_beg); - AsanStats& thread_stats = GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.reallocs++; thread_stats.realloced += new_size; - void* new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); if (chunk_state != CHUNK_ALLOCATED) @@ -797,13 +796,13 @@ struct Allocator { return new_ptr; } - void* Calloc(uptr nmemb, uptr size, BufferedStackTrace* stack) { + void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { if (AllocatorMayReturnNull()) return nullptr; ReportCallocOverflow(nmemb, size, stack); } - void* ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. if (ptr && allocator.FromPrimary(ptr)) @@ -811,15 +810,15 @@ struct Allocator { return ptr; } - void ReportInvalidFree(void* ptr, u8 chunk_state, BufferedStackTrace* stack) { + void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else ReportFreeNotMalloced((uptr)ptr, stack); } - void CommitBack(AsanThreadLocalMallocStorage* ms, BufferedStackTrace* stack) { - AllocatorCache* ac = GetAllocatorCache(ms); + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { + AllocatorCache *ac = GetAllocatorCache(ms); quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); allocator.SwallowCache(ac); } @@ -830,14 +829,14 @@ struct Allocator { // Returns nullptr if AsanChunk is not yet initialized just after // get_allocator().Allocate(), or is being destroyed just before // get_allocator().Deallocate(). - AsanChunk* GetAsanChunk(void* alloc_beg) { + AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; - AsanChunk* p = reinterpret_cast(alloc_beg)->Get(); + AsanChunk *p = reinterpret_cast(alloc_beg)->Get(); if (!p) { if (!allocator.FromPrimary(alloc_beg)) return nullptr; - p = reinterpret_cast(alloc_beg); + p = reinterpret_cast(alloc_beg); } u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); // It does not guaranty that Chunk is initialized, but it's @@ -847,45 +846,42 @@ struct Allocator { return nullptr; } - AsanChunk* GetAsanChunkByAddr(uptr p) { - void* alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); + AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } // Allocator must be locked when this function is called. - AsanChunk* GetAsanChunkByAddrFastLocked(uptr p) { - void* alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); } uptr AllocationSize(uptr p) { - AsanChunk* m = GetAsanChunkByAddr(p); - if (!m) - return 0; + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return 0; - if (m->Beg() != p) - return 0; + if (m->Beg() != p) return 0; return m->UsedSize(); } uptr AllocationSizeFast(uptr p) { - return reinterpret_cast(p - kChunkHeaderSize)->UsedSize(); + return reinterpret_cast(p - kChunkHeaderSize)->UsedSize(); } AsanChunkView FindHeapChunkByAddress(uptr addr) { - AsanChunk* m1 = GetAsanChunkByAddr(addr); + AsanChunk *m1 = GetAsanChunkByAddr(addr); sptr offset = 0; if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { // The address is in the chunk's left redzone, so maybe it is actually // a right buffer overflow from the other chunk before. // Search a bit before to see if there is another chunk. - AsanChunk* m2 = nullptr; + AsanChunk *m2 = nullptr; for (uptr l = 1; l < GetPageSizeCached(); l++) { m2 = GetAsanChunkByAddr(addr - l); - if (m2 == m1) - continue; // Still the same chunk. + if (m2 == m1) continue; // Still the same chunk. break; } if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) @@ -894,19 +890,19 @@ struct Allocator { return AsanChunkView(m1); } - void Purge(BufferedStackTrace* stack) { - AsanThread* t = GetCurrentThread(); + void Purge(BufferedStackTrace *stack) { + AsanThread *t = GetCurrentThread(); if (t) { - AsanThreadLocalMallocStorage* ms = &t->malloc_storage(); - quarantine.DrainAndRecycle( - GetQuarantineCache(ms), - QuarantineCallback(GetAllocatorCache(ms), stack)); + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + quarantine.DrainAndRecycle(GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms), + stack)); } { SpinMutexLock l(&fallback_mutex); - quarantine.DrainAndRecycle( - &fallback_quarantine_cache, - QuarantineCallback(&fallback_allocator_cache, stack)); + quarantine.DrainAndRecycle(&fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache, + stack)); } allocator.ForceReleaseToOS(); @@ -930,7 +926,9 @@ struct Allocator { static Allocator instance(LINKER_INITIALIZED); -static AsanAllocator& get_allocator() { return instance.allocator; } +static AsanAllocator &get_allocator() { + return instance.allocator; +} bool AsanChunkView::IsValid() const { return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != @@ -987,20 +985,20 @@ u32 AsanChunkView::GetFreeStackId() const { return stack; } -void InitializeAllocator(const AllocatorOptions& options) { +void InitializeAllocator(const AllocatorOptions &options) { instance.InitLinkerInitialized(options); } -void ReInitializeAllocator(const AllocatorOptions& options) { +void ReInitializeAllocator(const AllocatorOptions &options) { instance.ReInitialize(options); } // Apply provided AllocatorOptions to an Allocator -void ApplyAllocatorOptions(const AllocatorOptions& options) { +void ApplyAllocatorOptions(const AllocatorOptions &options) { instance.ApplyOptions(options); } -void GetAllocatorOptions(AllocatorOptions* options) { +void GetAllocatorOptions(AllocatorOptions *options) { instance.GetOptions(options); } @@ -1016,22 +1014,24 @@ void AsanThreadLocalMallocStorage::CommitBack() { instance.CommitBack(this, &stack); } -void PrintInternalAllocatorStats() { instance.PrintStats(); } +void PrintInternalAllocatorStats() { + instance.PrintStats(); +} -void asan_free(void* ptr, BufferedStackTrace* stack) { +void asan_free(void *ptr, BufferedStackTrace *stack) { instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC); } -void* asan_malloc(uptr size, BufferedStackTrace* stack) { +void *asan_malloc(uptr size, BufferedStackTrace *stack) { return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); } -void* asan_calloc(uptr nmemb, uptr size, BufferedStackTrace* stack) { +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); } -void* asan_reallocarray(void* p, uptr nmemb, uptr size, - BufferedStackTrace* stack) { +void *asan_reallocarray(void *p, uptr nmemb, uptr size, + BufferedStackTrace *stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { errno = errno_ENOMEM; if (AllocatorMayReturnNull()) @@ -1041,7 +1041,7 @@ void* asan_reallocarray(void* p, uptr nmemb, uptr size, return asan_realloc(p, nmemb * size, stack); } -void* asan_realloc(void* p, uptr size, BufferedStackTrace* stack) { +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { if (!p) return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); if (size == 0) { @@ -1055,12 +1055,12 @@ void* asan_realloc(void* p, uptr size, BufferedStackTrace* stack) { return SetErrnoOnNull(instance.Reallocate(p, size, stack)); } -void* asan_valloc(uptr size, BufferedStackTrace* stack) { +void *asan_valloc(uptr size, BufferedStackTrace *stack) { return SetErrnoOnNull( instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); } -void* asan_pvalloc(uptr size, BufferedStackTrace* stack) { +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; @@ -1074,7 +1074,7 @@ void* asan_pvalloc(uptr size, BufferedStackTrace* stack) { instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); } -void* asan_memalign(uptr alignment, uptr size, BufferedStackTrace* stack) { +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; if (AllocatorMayReturnNull()) @@ -1085,7 +1085,7 @@ void* asan_memalign(uptr alignment, uptr size, BufferedStackTrace* stack) { instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); } -void* asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace* stack) { +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; if (AllocatorMayReturnNull()) @@ -1096,14 +1096,14 @@ void* asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace* stack) { instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); } -int asan_posix_memalign(void** memptr, uptr alignment, uptr size, - BufferedStackTrace* stack) { +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { if (AllocatorMayReturnNull()) return errno_EINVAL; ReportInvalidPosixMemalignAlignment(alignment, stack); } - void* ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); if (UNLIKELY(!ptr)) // OOM error is already taken care of by Allocate. return errno_ENOMEM; @@ -1112,9 +1112,8 @@ int asan_posix_memalign(void** memptr, uptr alignment, uptr size, return 0; } -uptr asan_malloc_usable_size(const void* ptr, uptr pc, uptr bp) { - if (!ptr) - return 0; +uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { + if (!ptr) return 0; uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); if (flags()->check_malloc_usable_size && (usable_size == 0)) { GET_STACK_TRACE_FATAL(pc, bp); @@ -1125,12 +1124,12 @@ uptr asan_malloc_usable_size(const void* ptr, uptr pc, uptr bp) { namespace { -void* asan_new(uptr size, BufferedStackTrace* stack, bool array) { +void *asan_new(uptr size, BufferedStackTrace *stack, bool array) { return SetErrnoOnNull( instance.Allocate(size, 0, stack, array ? FROM_NEW_BR : FROM_NEW, true)); } -void* asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace* stack, +void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack, bool array) { if (UNLIKELY(alignment == 0 || !IsPowerOfTwo(alignment))) { errno = errno_EINVAL; @@ -1142,81 +1141,81 @@ void* asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace* stack, size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW, true)); } -void asan_delete(void* ptr, BufferedStackTrace* stack, bool array) { +void asan_delete(void *ptr, BufferedStackTrace *stack, bool array) { instance.Deallocate(ptr, 0, 0, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_aligned(void* ptr, uptr alignment, BufferedStackTrace* stack, +void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack, bool array) { instance.Deallocate(ptr, 0, alignment, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_sized(void* ptr, uptr size, BufferedStackTrace* stack, +void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack, bool array) { instance.Deallocate(ptr, size, 0, stack, array ? FROM_NEW_BR : FROM_NEW); } -void asan_delete_sized_aligned(void* ptr, uptr size, uptr alignment, - BufferedStackTrace* stack, bool array) { +void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack, bool array) { instance.Deallocate(ptr, size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW); } } // namespace -void* asan_new(uptr size, BufferedStackTrace* stack) { +void *asan_new(uptr size, BufferedStackTrace *stack) { return asan_new(size, stack, /*array=*/false); } -void* asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace* stack) { +void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack) { return asan_new_aligned(size, alignment, stack, /*array=*/false); } -void* asan_new_array(uptr size, BufferedStackTrace* stack) { +void *asan_new_array(uptr size, BufferedStackTrace *stack) { return asan_new(size, stack, /*array=*/true); } -void* asan_new_array_aligned(uptr size, uptr alignment, - BufferedStackTrace* stack) { +void *asan_new_array_aligned(uptr size, uptr alignment, + BufferedStackTrace *stack) { return asan_new_aligned(size, alignment, stack, /*array=*/true); } -void asan_delete(void* ptr, BufferedStackTrace* stack) { +void asan_delete(void *ptr, BufferedStackTrace *stack) { asan_delete(ptr, stack, /*array=*/false); } -void asan_delete_aligned(void* ptr, uptr alignment, BufferedStackTrace* stack) { +void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack) { asan_delete_aligned(ptr, alignment, stack, /*array=*/false); } -void asan_delete_sized(void* ptr, uptr size, BufferedStackTrace* stack) { +void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack) { asan_delete_sized(ptr, size, stack, /*array=*/false); } -void asan_delete_sized_aligned(void* ptr, uptr size, uptr alignment, - BufferedStackTrace* stack) { +void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack) { asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/false); } -void asan_delete_array(void* ptr, BufferedStackTrace* stack) { +void asan_delete_array(void *ptr, BufferedStackTrace *stack) { asan_delete(ptr, stack, /*array=*/true); } -void asan_delete_array_aligned(void* ptr, uptr alignment, - BufferedStackTrace* stack) { +void asan_delete_array_aligned(void *ptr, uptr alignment, + BufferedStackTrace *stack) { asan_delete_aligned(ptr, alignment, stack, /*array=*/true); } -void asan_delete_array_sized(void* ptr, uptr size, BufferedStackTrace* stack) { +void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack) { asan_delete_sized(ptr, size, stack, /*array=*/true); } -void asan_delete_array_sized_aligned(void* ptr, uptr size, uptr alignment, - BufferedStackTrace* stack) { +void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack) { asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/true); } -uptr asan_mz_size(const void* ptr) { +uptr asan_mz_size(const void *ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } @@ -1232,18 +1231,22 @@ void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { -void LockAllocator() { __asan::get_allocator().ForceLock(); } +void LockAllocator() { + __asan::get_allocator().ForceLock(); +} -void UnlockAllocator() { __asan::get_allocator().ForceUnlock(); } +void UnlockAllocator() { + __asan::get_allocator().ForceUnlock(); +} -void GetAllocatorGlobalRange(uptr* begin, uptr* end) { +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *begin = (uptr)&__asan::get_allocator(); *end = *begin + sizeof(__asan::get_allocator()); } -uptr PointsIntoChunk(void* p) { +uptr PointsIntoChunk(void *p) { uptr addr = reinterpret_cast(p); - __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != __asan::CHUNK_ALLOCATED) return 0; @@ -1258,55 +1261,57 @@ uptr PointsIntoChunk(void* p) { uptr GetUserBegin(uptr chunk) { // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is // not needed. - __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); return m ? m->Beg() : 0; } -uptr GetUserAddr(uptr chunk) { return chunk; } +uptr GetUserAddr(uptr chunk) { + return chunk; +} LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = chunk ? reinterpret_cast(chunk - __asan::kChunkHeaderSize) + metadata_ = chunk ? reinterpret_cast(chunk - __asan::kChunkHeaderSize) : nullptr; } bool LsanMetadata::allocated() const { if (!metadata_) return false; - __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return atomic_load(&m->chunk_state, memory_order_relaxed) == __asan::CHUNK_ALLOCATED; } ChunkTag LsanMetadata::tag() const { - __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return static_cast(m->lsan_tag); } void LsanMetadata::set_tag(ChunkTag value) { - __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); m->lsan_tag = value; } uptr LsanMetadata::requested_size() const { - __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); return m->UsedSize(); } u32 LsanMetadata::stack_trace_id() const { - __asan::AsanChunk* m = reinterpret_cast<__asan::AsanChunk*>(metadata_); + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); u32 tid = 0; u32 stack = 0; m->GetAllocContext(tid, stack); return stack; } -void ForEachChunk(ForEachChunkCallback callback, void* arg) { +void ForEachChunk(ForEachChunkCallback callback, void *arg) { __asan::get_allocator().ForEachChunk(callback, arg); } -IgnoreObjectResult IgnoreObject(const void* p) { +IgnoreObjectResult IgnoreObject(const void *p) { uptr addr = reinterpret_cast(p); - __asan::AsanChunk* m = __asan::instance.GetAsanChunkByAddr(addr); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); if (!m || (atomic_load(&m->chunk_state, memory_order_acquire) != __asan::CHUNK_ALLOCATED) || @@ -1324,29 +1329,30 @@ IgnoreObjectResult IgnoreObject(const void* p) { // ---------------------- Interface ---------------- {{{1 using namespace __asan; -static const void* AllocationBegin(const void* p) { - AsanChunk* m = __asan::instance.GetAsanChunkByAddr((uptr)p); +static const void *AllocationBegin(const void *p) { + AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p); if (!m) return nullptr; if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) return nullptr; if (m->UsedSize() == 0) return nullptr; - return (const void*)(m->Beg()); + return (const void *)(m->Beg()); } // ASan allocator doesn't reserve extra bytes, so normally we would // just return "size". We don't want to expose our redzone sizes, etc here. -uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} -int __sanitizer_get_ownership(const void* p) { +int __sanitizer_get_ownership(const void *p) { uptr ptr = reinterpret_cast(p); return instance.AllocationSize(ptr) > 0; } -uptr __sanitizer_get_allocated_size(const void* p) { - if (!p) - return 0; +uptr __sanitizer_get_allocated_size(const void *p) { + if (!p) return 0; uptr ptr = reinterpret_cast(p); uptr allocated_size = instance.AllocationSize(ptr); // Die if p is not malloced or if it is already freed. @@ -1357,14 +1363,14 @@ uptr __sanitizer_get_allocated_size(const void* p) { return allocated_size; } -uptr __sanitizer_get_allocated_size_fast(const void* p) { +uptr __sanitizer_get_allocated_size_fast(const void *p) { DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); uptr ret = instance.AllocationSizeFast(reinterpret_cast(p)); DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); return ret; } -const void* __sanitizer_get_allocated_begin(const void* p) { +const void *__sanitizer_get_allocated_begin(const void *p) { return AllocationBegin(p); } @@ -1380,18 +1386,17 @@ int __asan_update_allocation_context(void* addr) { #if SANITIZER_AMDGPU DECLARE_REAL(hsa_status_t, hsa_amd_agents_allow_access, uint32_t num_agents, - const hsa_agent_t* agents, const uint32_t* flags, const void* ptr) + const hsa_agent_t *agents, const uint32_t *flags, const void *ptr) DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_allocate, - hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, - void** ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_free, void* ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_create, void* ptr, size_t len, - hsa_amd_ipc_memory_t* handle) + hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, + void **ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_memory_pool_free, void *ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_create, void *ptr, size_t len, + hsa_amd_ipc_memory_t *handle) DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_attach, - const hsa_amd_ipc_memory_t* handle, size_t len, - uint32_t num_agents, const hsa_agent_t* mapping_agents, - void** mapped_ptr) -DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void* mapped_ptr) + const hsa_amd_ipc_memory_t *handle, size_t len, uint32_t num_agents, + const hsa_agent_t *mapping_agents, void **mapped_ptr) +DECLARE_REAL(hsa_status_t, hsa_amd_ipc_memory_detach, void *mapped_ptr) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_reserve_align, void** ptr, size_t size, uint64_t address, uint64_t alignment, uint64_t flags) DECLARE_REAL(hsa_status_t, hsa_amd_vmem_address_free, void* ptr, size_t size) @@ -1405,23 +1410,24 @@ namespace __asan { static const size_t kPageSize_ = 4096; hsa_status_t asan_hsa_amd_memory_pool_allocate( - hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, void** ptr, - BufferedStackTrace* stack) { + hsa_amd_memory_pool_t memory_pool, size_t size, uint32_t flags, void **ptr, + BufferedStackTrace *stack) { AmdgpuAllocationInfo aa_info; aa_info.alloc_func = - reinterpret_cast(asan_hsa_amd_memory_pool_allocate); + reinterpret_cast(asan_hsa_amd_memory_pool_allocate); aa_info.memory_pool = memory_pool; aa_info.size = size; aa_info.flags = flags; aa_info.ptr = nullptr; - SetErrnoOnNull(*ptr = instance.Allocate(size, kPageSize_, stack, FROM_MALLOC, - false, &aa_info)); + SetErrnoOnNull(*ptr = instance.Allocate(size, kPageSize_, stack, + FROM_MALLOC, false, &aa_info)); return aa_info.status; } -hsa_status_t asan_hsa_amd_memory_pool_free(void* ptr, - BufferedStackTrace* stack) { - void* p = get_allocator().GetBlockBegin(ptr); +hsa_status_t asan_hsa_amd_memory_pool_free( + void *ptr, + BufferedStackTrace *stack) { + void *p = get_allocator().GetBlockBegin(ptr); if (p) { instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC); return HSA_STATUS_SUCCESS; @@ -1429,12 +1435,11 @@ hsa_status_t asan_hsa_amd_memory_pool_free(void* ptr, return REAL(hsa_amd_memory_pool_free)(ptr); } -hsa_status_t asan_hsa_amd_agents_allow_access(uint32_t num_agents, - const hsa_agent_t* agents, - const uint32_t* flags, - const void* ptr, - BufferedStackTrace* stack) { - void* p = get_allocator().GetBlockBegin(ptr); +hsa_status_t asan_hsa_amd_agents_allow_access( + uint32_t num_agents, const hsa_agent_t *agents, const uint32_t *flags, + const void *ptr, + BufferedStackTrace *stack) { + void *p = get_allocator().GetBlockBegin(ptr); return REAL(hsa_amd_agents_allow_access)(num_agents, agents, flags, p ? p : ptr); } @@ -1444,11 +1449,11 @@ hsa_status_t asan_hsa_amd_agents_allow_access(uint32_t num_agents, // is always one kPageSize_ // IPC calls use static_assert to make sure kMetadataSize = 0 // -# if SANITIZER_CAN_USE_ALLOCATOR64 +#if SANITIZER_CAN_USE_ALLOCATOR64 static struct AP64 AP_; -# else +#else static struct AP32 AP_; -# endif +#endif hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, hsa_amd_ipc_memory_t* handle) { @@ -1468,25 +1473,24 @@ hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, return REAL(hsa_amd_ipc_memory_create)(ptr, len, handle); } -hsa_status_t asan_hsa_amd_ipc_memory_attach(const hsa_amd_ipc_memory_t* handle, - size_t len, uint32_t num_agents, - const hsa_agent_t* mapping_agents, - void** mapped_ptr) { +hsa_status_t asan_hsa_amd_ipc_memory_attach(const hsa_amd_ipc_memory_t *handle, + size_t len, uint32_t num_agents, const hsa_agent_t *mapping_agents, + void **mapped_ptr) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); size_t len_ = len + kPageSize_; hsa_status_t status = REAL(hsa_amd_ipc_memory_attach)( - handle, len_, num_agents, mapping_agents, mapped_ptr); + handle, len_, num_agents, mapping_agents, mapped_ptr); if (status == HSA_STATUS_SUCCESS && mapped_ptr) { - *mapped_ptr = reinterpret_cast(reinterpret_cast(*mapped_ptr) + - kPageSize_); + *mapped_ptr = reinterpret_cast(reinterpret_cast(*mapped_ptr) + + kPageSize_); } return status; } -hsa_status_t asan_hsa_amd_ipc_memory_detach(void* mapped_ptr) { +hsa_status_t asan_hsa_amd_ipc_memory_detach(void *mapped_ptr) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); - void* mapped_ptr_ = - reinterpret_cast(reinterpret_cast(mapped_ptr) - kPageSize_); + void *mapped_ptr_ = + reinterpret_cast(reinterpret_cast(mapped_ptr) - kPageSize_); return REAL(hsa_amd_ipc_memory_detach)(mapped_ptr_); } @@ -1551,11 +1555,11 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, hsa_agent_t** accessible) { void* ptr_ = get_allocator().GetBlockBegin(ptr); AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)); - hsa_status_t status = HSA_STATUS_ERROR_NOT_INITIALIZED; - if (ptr_ && m){ + hsa_status_t status; + if (ptr_ && m) status = REAL(hsa_amd_pointer_info)(ptr, info, alloc, num_agents_accessible, accessible); - if (status == HSA_STATUS_SUCCESS && info) { + if (status == HSA_STATUS_SUCCESS && info && ptr_ && m) { static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); info->agentBaseAddress = reinterpret_cast( reinterpret_cast(info->agentBaseAddress) + kPageSize_); @@ -1563,7 +1567,6 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, reinterpret_cast(info->hostBaseAddress) + kPageSize_); info->sizeInBytes = m->UsedSize(); } -} return status; } From 601f45363530dd8e079fcf05f5463f1c23b5f35f Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Fri, 14 Nov 2025 11:04:12 +0530 Subject: [PATCH 7/8] Improve code for edge cases testing. - Changes to 'asan_hsa_amd_pointer_info' - Initialize status with default value 'HSA_STATUS_ERROR_NOT_INITIALIZE'. - Assure by adding ternary condition to check if ptr_ is not nullptr when calling GetASanChunkByAddr. --- compiler-rt/lib/asan/asan_allocator.cpp | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 7b6bc9268f54e..4a7b750dcbff6 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1554,18 +1554,21 @@ hsa_status_t asan_hsa_amd_pointer_info(const void* ptr, uint32_t* num_agents_accessible, hsa_agent_t** accessible) { void* ptr_ = get_allocator().GetBlockBegin(ptr); - AsanChunk* m = instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)); - hsa_status_t status; - if (ptr_ && m) + AsanChunk* m = ptr_ + ? instance.GetAsanChunkByAddr(reinterpret_cast(ptr_)) + : nullptr; + hsa_status_t status = HSA_STATUS_ERROR_NOT_INITIALIZED; + if (ptr_ && m) { status = REAL(hsa_amd_pointer_info)(ptr, info, alloc, num_agents_accessible, accessible); - if (status == HSA_STATUS_SUCCESS && info && ptr_ && m) { - static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); - info->agentBaseAddress = reinterpret_cast( - reinterpret_cast(info->agentBaseAddress) + kPageSize_); - info->hostBaseAddress = reinterpret_cast( - reinterpret_cast(info->hostBaseAddress) + kPageSize_); - info->sizeInBytes = m->UsedSize(); + if (status == HSA_STATUS_SUCCESS && info) { + static_assert(AP_.kMetadataSize == 0, "Expression below requires this"); + info->agentBaseAddress = reinterpret_cast( + reinterpret_cast(info->agentBaseAddress) + kPageSize_); + info->hostBaseAddress = reinterpret_cast( + reinterpret_cast(info->hostBaseAddress) + kPageSize_); + info->sizeInBytes = m->UsedSize(); + } } return status; } From eed8503dbd5e38d140903f1625a8deaf063364c5 Mon Sep 17 00:00:00 2001 From: Amit Kumar Pandey Date: Thu, 20 Nov 2025 14:58:10 +0530 Subject: [PATCH 8/8] Address comments of @b-sumner. Fix Typo. --- compiler-rt/lib/asan/asan_allocator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 4a7b750dcbff6..4520d732d5062 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1466,7 +1466,7 @@ hsa_status_t asan_hsa_amd_ipc_memory_create(void* ptr, size_t len, uptr p = reinterpret_cast(ptr); uptr p_ = reinterpret_cast(ptr_); if (p == p_ + kPageSize_ && len == m->UsedSize()) { - size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr); + size_t len_ = get_allocator().GetActuallyAllocatedSize(ptr_); return REAL(hsa_amd_ipc_memory_create)(ptr_, len_, handle); } }