Kees Cook kees@kernel.org writes:
For both stringify functions, snprintf is potentially unsafe. In the spirit of recent string API discussions, please switch to using a seq_buf:
static void stringify_free_seq(struct kunit *test, int *seq, seq_buf *buf) { unsigned int i;
for (i = 0; i < BUFFER_NUM; i++) seq_buf_printf(buf, "[%d]", seq[i]) KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(buf)); } ...
DECLARE_SEQ_BUF(freeseq_buf, FREESEQ_BUFLEN); ... stringify_free_seq(test, tc->free_sequence, &freeseq_buf);
Thanks for calling attention to this! Will be fixed for v4!
static bool check_buffer_pages_allocated(struct kunit *test, @@ -124,28 +164,30 @@ static bool check_buffer_pages_allocated(struct kunit *test, return true; }
-static void binder_alloc_test_alloc_buf(struct kunit *test,
struct binder_alloc *alloc,
struct binder_buffer *buffers[],
size_t *sizes, int *seq)
+static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
struct binder_alloc *alloc,
struct binder_buffer *buffers[],
{size_t *sizes, int *seq)
- unsigned long failures = 0; int i;
for (i = 0; i < BUFFER_NUM; i++) { buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
{
pr_err_size_seq(test, sizes, seq);
binder_alloc_test_failures++;
}
!check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
}failures++;
- return failures; }
-static void binder_alloc_test_free_buf(struct kunit *test,
struct binder_alloc *alloc,
struct binder_buffer *buffers[],
size_t *sizes, int *seq, size_t end)
+static unsigned long binder_alloc_test_free_buf(struct kunit *test,
struct binder_alloc *alloc,
struct binder_buffer *buffers[],
{size_t *sizes, int *seq, size_t end)
- unsigned long failures = 0; int i;
for (i = 0; i < BUFFER_NUM; i++) @@ -153,17 +195,19 @@ static void binder_alloc_test_free_buf(struct kunit *test,
for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) { if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(test, sizes, seq); kunit_err(test, "expect lru but is %s at page index %d\n", alloc->pages[i] ? "alloc" : "free", i);
binder_alloc_test_failures++;
} }failures++;
- return failures; }
-static void binder_alloc_test_free_page(struct kunit *test,
struct binder_alloc *alloc)
+static unsigned long binder_alloc_test_free_page(struct kunit *test,
{struct binder_alloc *alloc)
- unsigned long failures = 0; unsigned long count; int i;
@@ -177,27 +221,70 @@ static void binder_alloc_test_free_page(struct kunit *test, kunit_err(test, "expect free but is %s at page index %d\n", list_empty(page_to_lru(alloc->pages[i])) ? "alloc" : "lru", i);
binder_alloc_test_failures++;
} }failures++;
- return failures; }
-static void binder_alloc_test_alloc_free(struct kunit *test, +/* Executes one full test run for the given test case. */ +static bool binder_alloc_test_alloc_free(struct kunit *test, struct binder_alloc *alloc,
size_t *sizes, int *seq, size_t end)
struct binder_alloc_test_case_info *tc,
{size_t end)
- unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE; struct binder_buffer *buffers[BUFFER_NUM];
- binder_alloc_test_alloc_buf(test, alloc, buffers, sizes, seq);
- binder_alloc_test_free_buf(test, alloc, buffers, sizes, seq, end);
- unsigned long failures;
- bool failed = false;
- failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
tc->buffer_sizes,
tc->free_sequence);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"Initial allocation failed: %lu/%u buffers with errors",
failures, BUFFER_NUM);
- failures = binder_alloc_test_free_buf(test, alloc, buffers,
tc->buffer_sizes,
tc->free_sequence, end);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"Initial buffers not freed correctly: %lu/%lu pages not on lru
list",
failures, pages);
/* Allocate from lru. */
- binder_alloc_test_alloc_buf(test, alloc, buffers, sizes, seq);
- if (list_lru_count(alloc->freelist))
kunit_err(test, "lru list should be empty but is not\n");
- binder_alloc_test_free_buf(test, alloc, buffers, sizes, seq, end);
- binder_alloc_test_free_page(test, alloc);
- failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
tc->buffer_sizes,
tc->free_sequence);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"Reallocation failed: %lu/%u buffers with errors",
failures, BUFFER_NUM);
- failures = list_lru_count(alloc->freelist);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"lru list should be empty after reallocation but still has %lu
pages",
failures);
- failures = binder_alloc_test_free_buf(test, alloc, buffers,
tc->buffer_sizes,
tc->free_sequence, end);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"Reallocated buffers not freed correctly: %lu/%lu pages not on
lru list",
failures, pages);
- failures = binder_alloc_test_free_page(test, alloc);
- failed = failed || failures;
- KUNIT_EXPECT_EQ_MSG(test, failures, 0,
"Failed to clean up allocated pages: %lu/%lu pages still
installed",
failures, (alloc->buffer_size / PAGE_SIZE));
- return failed; }
static bool is_dup(int *seq, int index, int val) @@ -213,24 +300,44 @@ static bool is_dup(int *seq, int index, int val)
/* Generate BUFFER_NUM factorial free orders. */ static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
size_t *sizes, int *seq, int index, size_t end)
struct binder_alloc_test_case_info *tc,
unsigned long *runs, unsigned long *failures,
{int index, size_t end)
- bool case_failed; int i;
if (index == BUFFER_NUM) {
binder_alloc_test_alloc_free(test, alloc, sizes, seq, end);
char freeseq_buf[FREESEQ_BUFLEN];
case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
*runs += 1;
*failures += case_failed;
if (case_failed || PRINT_ALL_CASES) {
stringify_free_seq(test, tc->free_sequence, freeseq_buf,
FREESEQ_BUFLEN);
kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
case_failed ? "FAILED" : "PASSED",
tc->front_pages ? "front" : "back ",
tc->alignments, freeseq_buf);
}
- return; } for (i = 0; i < BUFFER_NUM; i++) {
if (is_dup(seq, index, i))
if (is_dup(tc->free_sequence, index, i)) continue;
seq[index] = i;
permute_frees(test, alloc, sizes, seq, index + 1, end);
tc->free_sequence[index] = i;
} }permute_frees(test, alloc, tc, runs, failures, index + 1, end);
-static void gen_buf_sizes(struct kunit *test, struct binder_alloc *alloc,
size_t *end_offset)
+static void gen_buf_sizes(struct kunit *test,
struct binder_alloc *alloc,
struct binder_alloc_test_case_info *tc,
size_t *end_offset, unsigned long *runs,
{ size_t last_offset, offset = 0; size_t front_sizes[BUFFER_NUM];unsigned long *failures)
@@ -238,31 +345,45 @@ static void gen_buf_sizes(struct kunit *test, struct binder_alloc *alloc, int seq[BUFFER_NUM] = {0}; int i;
- tc->free_sequence = seq; for (i = 0; i < BUFFER_NUM; i++) { last_offset = offset; offset = end_offset[i]; front_sizes[i] = offset - last_offset; back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; }
- back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- /*
*/
- Buffers share the first or last few pages.
- Only BUFFER_NUM - 1 buffer sizes are adjustable since
- we need one giant buffer before getting to the last page.
- back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- permute_frees(test, alloc, front_sizes, seq, 0,
- tc->front_pages = true;
- tc->buffer_sizes = front_sizes;
- permute_frees(test, alloc, tc, runs, failures, 0, end_offset[BUFFER_NUM - 1]);
- permute_frees(test, alloc, back_sizes, seq, 0, alloc->buffer_size);
- tc->front_pages = false;
- tc->buffer_sizes = back_sizes;
- permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size); }
static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
size_t *end_offset, int index)
size_t *end_offset, int *alignments,
unsigned long *runs, unsigned long *failures,
{ size_t end, prev; int align;int index)
if (index == BUFFER_NUM) {
gen_buf_sizes(test, alloc, end_offset);
struct binder_alloc_test_case_info tc = {0};
stringify_alignments(test, alignments, tc.alignments,
ALIGNMENTS_BUFLEN);
return; } prev = index == 0 ? 0 : end_offset[index - 1];gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
@@ -276,7 +397,9 @@ static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc, else end += BUFFER_MIN_SIZE; end_offset[index] = end;
gen_buf_offsets(test, alloc, end_offset, index + 1);
alignments[index] = align;
gen_buf_offsets(test, alloc, end_offset, alignments, runs,
} }failures, index + 1);
@@ -328,10 +451,15 @@ static void binder_alloc_exhaustive_test(struct kunit *test) { struct binder_alloc_test *priv = test->priv; size_t end_offset[BUFFER_NUM];
- int alignments[BUFFER_NUM];
- unsigned long failures = 0;
- unsigned long runs = 0;
- gen_buf_offsets(test, &priv->alloc, end_offset, 0);
- gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
&failures, 0);
- KUNIT_EXPECT_EQ(test, binder_alloc_test_failures, 0);
- KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
- KUNIT_EXPECT_EQ(test, failures, 0); }
/* ===== End test cases ===== */
2.50.0.727.gbf7dc18ff4-goog
Otherwise looks good to me.