summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Samsonov <vonosmas@gmail.com>2014-12-16 01:23:03 +0000
committerAlexey Samsonov <vonosmas@gmail.com>2014-12-16 01:23:03 +0000
commit5f21f50ac45c09c9aebbb0ceaf207d38f2b85873 (patch)
tree7bba973ccd3f2420edc28f571687298c0b6cb38d
parent37215cf706ab74ad5f59cd53e0eff26aaab047cb (diff)
[ASan] Allow to atomically modify malloc_context_size at runtime.
Summary: Introduce __asan::malloc_context_size atomic that is used to determine required malloc/free stack trace size. It is initialized with common_flags()->malloc_context_size flag, but can later be overwritten at runtime (e.g. when ASan is activated / deactivated). Test Plan: regression test suite Reviewers: kcc, eugenis Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D6645 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@224305 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/asan/asan_activation.cc6
-rw-r--r--lib/asan/asan_rtl.cc8
-rw-r--r--lib/asan/asan_stack.cc15
-rw-r--r--lib/asan/asan_stack.h10
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace.cc2
5 files changed, 30 insertions, 11 deletions
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index c4733a337..02e091192 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -16,6 +16,7 @@
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
+#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@@ -62,13 +63,12 @@ void AsanActivate() {
// FIXME: this is not atomic, and there may be other threads alive.
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
- common_flags()->malloc_context_size =
- asan_deactivated_flags.malloc_context_size;
flags()->alloc_dealloc_mismatch =
asan_deactivated_flags.alloc_dealloc_mismatch;
ParseExtraActivationFlags();
+ SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
ReInitializeAllocator(asan_deactivated_flags.allocator_may_return_null,
asan_deactivated_flags.quarantine_size);
@@ -77,7 +77,7 @@ void AsanActivate() {
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d\n",
asan_deactivated_flags.quarantine_size, flags()->max_redzone,
- flags()->poison_heap, common_flags()->malloc_context_size,
+ flags()->poison_heap, asan_deactivated_flags.malloc_context_size,
flags()->alloc_dealloc_mismatch,
asan_deactivated_flags.allocator_may_return_null);
}
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index bb27cca74..79f40346d 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -71,8 +71,6 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
}
// -------------------------- Flags ------------------------- {{{1
-static const int kDefaultMallocContextSize = 30;
-
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
@@ -93,7 +91,6 @@ static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
- CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
// Please write meaningful flag descriptions when adding new flags.
ParseFlag(str, &f->quarantine_size, "quarantine_size",
"Size (in bytes) of quarantine used to detect use-after-free "
@@ -322,7 +319,8 @@ void InitializeFlags(Flags *f, const char *env) {
if (f->strict_init_order) {
f->check_initialization_order = true;
}
- CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
+ CHECK_LE((uptr)cf->malloc_context_size, kStackTraceMax);
+ CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
}
// Parse flags that may change between startup and activation.
@@ -572,6 +570,8 @@ static void AsanInitInternal() {
const char *options = GetEnv("ASAN_OPTIONS");
InitializeFlags(flags(), options);
+ SetMallocContextSize(common_flags()->malloc_context_size);
+
InitializeHighMemEnd();
// Make sure we are not statically linked.
diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc
index 8188f3b5b..cf7a587fa 100644
--- a/lib/asan/asan_stack.cc
+++ b/lib/asan/asan_stack.cc
@@ -13,6 +13,21 @@
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __asan {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+} // namespace __asan
// ------------------ Interface -------------- {{{1
diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h
index a99525621..122967a15 100644
--- a/lib/asan/asan_stack.h
+++ b/lib/asan/asan_stack.h
@@ -21,6 +21,11 @@
namespace __asan {
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
@@ -93,9 +98,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
-#define GET_STACK_TRACE_MALLOC \
- GET_STACK_TRACE(common_flags()->malloc_context_size, \
- common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc
index cf061fb8c..13549c4f8 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace.cc
@@ -120,7 +120,7 @@ void BufferedStackTrace::PopStackFrames(uptr count) {
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 288;
+ const int kPcThreshold = 304;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;