With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Marco Elver elver@google.com --- v2: * Fix for architectures which do not provide their own memcpy/memset/memmove and instead use the generic versions in lib/string. In this case we'll just alias the __tsan_ variants. --- kernel/kcsan/core.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..4015f2a3e7f6 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h>
#include "encoding.h" @@ -1308,3 +1309,41 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence); + +#ifdef __HAVE_ARCH_MEMSET +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{ + check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_); + return __memset(s, c, count); +} +#else +void *__tsan_memset(void *s, int c, size_t count) __alias(memset); +#endif +EXPORT_SYMBOL(__tsan_memset); + +#ifdef __HAVE_ARCH_MEMMOVE +void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{ + check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_); + check_access(src, len, 0, _RET_IP_); + return __memmove(dst, src, len); +} +#else +void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove); +#endif +EXPORT_SYMBOL(__tsan_memmove); + +#ifdef __HAVE_ARCH_MEMCPY +void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{ + check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_); + check_access(src, len, 0, _RET_IP_); + return __memcpy(dst, src, len); +} +#else +void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy); +#endif +EXPORT_SYMBOL(__tsan_memcpy);
On Fri, 9 Sept 2022 at 09:38, Marco Elver elver@google.com wrote:
With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Marco Elver elver@google.com
v2:
- Fix for architectures which do not provide their own memcpy/memset/memmove and instead use the generic versions in lib/string. In this case we'll just alias the __tsan_ variants.
kernel/kcsan/core.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..4015f2a3e7f6 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h>
#include "encoding.h" @@ -1308,3 +1309,41 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+#ifdef __HAVE_ARCH_MEMSET +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{
check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_);
These can use large sizes, does it make sense to truncate it to MAX_ENCODABLE_SIZE?
return __memset(s, c, count);
+} +#else +void *__tsan_memset(void *s, int c, size_t count) __alias(memset); +#endif +EXPORT_SYMBOL(__tsan_memset);
+#ifdef __HAVE_ARCH_MEMMOVE +void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{
check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
check_access(src, len, 0, _RET_IP_);
return __memmove(dst, src, len);
+} +#else +void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove); +#endif +EXPORT_SYMBOL(__tsan_memmove);
+#ifdef __HAVE_ARCH_MEMCPY +void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{
check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
check_access(src, len, 0, _RET_IP_);
return __memcpy(dst, src, len);
+} +#else +void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy); +#endif
+EXPORT_SYMBOL(__tsan_memcpy);
2.37.2.789.g6183377224-goog
On Fri, 9 Sept 2022 at 10:38, Dmitry Vyukov dvyukov@google.com wrote:
On Fri, 9 Sept 2022 at 09:38, Marco Elver elver@google.com wrote:
With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Marco Elver elver@google.com
v2:
- Fix for architectures which do not provide their own memcpy/memset/memmove and instead use the generic versions in lib/string. In this case we'll just alias the __tsan_ variants.
kernel/kcsan/core.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..4015f2a3e7f6 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h>
#include "encoding.h" @@ -1308,3 +1309,41 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+#ifdef __HAVE_ARCH_MEMSET +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{
check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_);
These can use large sizes, does it make sense to truncate it to MAX_ENCODABLE_SIZE?
Hmm, good point - that way it can still set up watchpoints on them. I'll do a v3.
linux-stable-mirror@lists.linaro.org