With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Marco Elver elver@google.com --- kernel/kcsan/core.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..66ef48aa86e0 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h>
#include "encoding.h" @@ -1308,3 +1309,29 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence); + +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{ + check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_); + return __memset(s, c, count); +} +EXPORT_SYMBOL(__tsan_memset); + +void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{ + check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_); + check_access(src, len, 0, _RET_IP_); + return __memmove(dst, src, len); +} +EXPORT_SYMBOL(__tsan_memmove); + +void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{ + check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_); + check_access(src, len, 0, _RET_IP_); + return __memcpy(dst, src, len); +} +EXPORT_SYMBOL(__tsan_memcpy);
On Wed, Sep 07, 2022 at 07:39:02PM +0200, Marco Elver wrote:
With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+
For (b) I think this is Ok, but for (a), what the atomic guarantee of our mem* functions? Per-byte atomic or something more complicated (for example, providing best effort atomic if a memory location in the range is naturally-aligned to a machine word)?
If it's a per-byte atomicity, then maybe another KCSAN_ACCESS_* flags is needed, otherwise memset(0x8, 0, 0x2) is considered as atomic if ASSUME_PLAIN_WRITES_ATOMIC=y. Unless I'm missing something.
Anyway, this may be worth another patch and some discussion/doc, because it just improve the accuracy of the tool. In other words, this patch and the "stable" tag look good to me.
Regards, Boqun
Signed-off-by: Marco Elver elver@google.com
kernel/kcsan/core.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..66ef48aa86e0 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h> #include "encoding.h" @@ -1308,3 +1309,29 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{
- check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_);
- return __memset(s, c, count);
+} +EXPORT_SYMBOL(__tsan_memset);
+void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{
- check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
- check_access(src, len, 0, _RET_IP_);
- return __memmove(dst, src, len);
+} +EXPORT_SYMBOL(__tsan_memmove);
+void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{
- check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
- check_access(src, len, 0, _RET_IP_);
- return __memcpy(dst, src, len);
+}
+EXPORT_SYMBOL(__tsan_memcpy);
2.37.2.789.g6183377224-goog
On Wed, 7 Sept 2022 at 20:17, Boqun Feng boqun.feng@gmail.com wrote:
On Wed, Sep 07, 2022 at 07:39:02PM +0200, Marco Elver wrote:
With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+
For (b) I think this is Ok, but for (a), what the atomic guarantee of our mem* functions? Per-byte atomic or something more complicated (for example, providing best effort atomic if a memory location in the range is naturally-aligned to a machine word)?
There should be no atomicity guarantee of mem*() functions, anything else would never be safe, given compilers love to optimize all of them (replacing the calls with inline versions etc.).
If it's a per-byte atomicity, then maybe another KCSAN_ACCESS_* flags is needed, otherwise memset(0x8, 0, 0x2) is considered as atomic if ASSUME_PLAIN_WRITES_ATOMIC=y. Unless I'm missing something.
Anyway, this may be worth another patch and some discussion/doc, because it just improve the accuracy of the tool. In other words, this patch and the "stable" tag look good to me.
Right, this will treat write accesses done by mem*() functions with a size less than or equal to word size as atomic if that option is on. However, I feel the more interesting cases will be memcpy/memset/memmove with much larger sizes. That being said, note that even though we pretend smaller than word size writes might be atomic, for no data race to be detected, both accesses need to be atomic.
If that behaviour should be changed for mem*() functions in the default non-strict config is, like you say, something to ponder. In general, I find the ASSUME_PLAIN_WRITES_ATOMIC=y a pretty bad default, and I'd rather just change that default. But unfortunately, I think the kernel isn't ready for that, given opinions on this still diverge.
Thanks, -- Marco
On Wed, 7 Sept 2022 at 19:39, Marco Elver elver@google.com wrote:
With Clang version 16+, -fsanitize=thread will turn memcpy/memset/memmove calls in instrumented functions into __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
Add these functions to the core KCSAN runtime, so that we (a) catch data races with mem* functions, and (b) won't run into linker errors with such newer compilers.
Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Marco Elver elver@google.com
kernel/kcsan/core.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index fe12dfe254ec..66ef48aa86e0 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,6 +18,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/sched.h> +#include <linux/string.h> #include <linux/uaccess.h>
#include "encoding.h" @@ -1308,3 +1309,29 @@ noinline void __tsan_atomic_signal_fence(int memorder) } } EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{
check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_);
return __memset(s, c, count);
+} +EXPORT_SYMBOL(__tsan_memset);
+void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{
check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
check_access(src, len, 0, _RET_IP_);
return __memmove(dst, src, len);
+} +EXPORT_SYMBOL(__tsan_memmove);
+void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{
check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
check_access(src, len, 0, _RET_IP_);
return __memcpy(dst, src, len);
+} +EXPORT_SYMBOL(__tsan_memcpy);
I missed that s390 doesn't have arch memcpy variants, so this fails:
kernel/kcsan/core.c:1316:16: error: implicit declaration of function '__memset'; did you mean '__memset64'? [-Werror=implicit-function-declaration]
I'll send a v2 where __tsan_mem* is aliased to generic versions if the arch doesn't have mem*() functions.
-- 2.37.2.789.g6183377224-goog
linux-stable-mirror@lists.linaro.org