public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH liburing] tests: remove -EBUSY on CQE backlog tests
@ 2021-04-20 22:01 Pavel Begunkov
  2021-04-21 17:03 ` Jens Axboe
  0 siblings, 1 reply; 2+ messages in thread
From: Pavel Begunkov @ 2021-04-20 22:01 UTC (permalink / raw)
  To: Jens Axboe, io-uring

From 5.13 the kernel doesn't limit users submission with CQE backlog,
that was previously failed with -EBUSY. Remove related tests.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 test/Makefile           |   2 -
 test/cq-overflow-peek.c |  88 -----------------
 test/cq-overflow.c      | 204 ----------------------------------------
 3 files changed, 294 deletions(-)
 delete mode 100644 test/cq-overflow-peek.c

diff --git a/test/Makefile b/test/Makefile
index 210571c..4572564 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -34,7 +34,6 @@ test_targets += \
 	connect \
 	cq-full \
 	cq-overflow \
-	cq-overflow-peek \
 	cq-peek-batch \
 	cq-ready \
 	cq-size \
@@ -169,7 +168,6 @@ test_srcs := \
 	close-opath.c \
 	connect.c \
 	cq-full.c \
-	cq-overflow-peek.c \
 	cq-overflow.c \
 	cq-peek-batch.c \
 	cq-ready.c\
diff --git a/test/cq-overflow-peek.c b/test/cq-overflow-peek.c
deleted file mode 100644
index 353c6f3..0000000
--- a/test/cq-overflow-peek.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Check if the kernel sets IORING_SQ_CQ_OVERFLOW so that peeking events
- * still enter the kernel to flush events, if the CQ side is overflown.
- */
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include "liburing.h"
-
-static int test_cq_overflow(struct io_uring *ring)
-{
-	struct io_uring_cqe *cqe;
-	struct io_uring_sqe *sqe;
-	unsigned flags;
-	int issued = 0;
-	int ret = 0;
-
-	do {
-		sqe = io_uring_get_sqe(ring);
-		if (!sqe) {
-			fprintf(stderr, "get sqe failed\n");
-			goto err;
-		}
-		ret = io_uring_submit(ring);
-		if (ret <= 0) {
-			if (ret != -EBUSY)
-				fprintf(stderr, "sqe submit failed: %d\n", ret);
-			break;
-		}
-		issued++;
-	} while (ret > 0);
-
-	assert(ret == -EBUSY);
-
-	flags = IO_URING_READ_ONCE(*ring->sq.kflags);
-	if (!(flags & IORING_SQ_CQ_OVERFLOW)) {
-		fprintf(stdout, "OVERFLOW not set on -EBUSY, skipping\n");
-		goto done;
-	}
-
-	while (issued) {
-		ret = io_uring_peek_cqe(ring, &cqe);
-		if (ret) {
-			if (ret != -EAGAIN) {
-				fprintf(stderr, "peek completion failed: %s\n",
-					strerror(ret));
-				break;
-			}
-			continue;
-		}
-		io_uring_cqe_seen(ring, cqe);
-		issued--;
-	}
-
-done:
-	return 0;
-err:
-	return 1;
-}
-
-int main(int argc, char *argv[])
-{
-	int ret;
-	struct io_uring ring;
-	struct io_uring_params p = { };
-
-	if (argc > 1)
-		return 0;
-
-	ret = io_uring_queue_init_params(16, &ring, &p);
-	if (ret) {
-		fprintf(stderr, "ring setup failed: %d\n", ret);
-		return 1;
-	}
-
-	if (!(p.features & IORING_FEAT_NODROP)) {
-		fprintf(stdout, "No overflow protection, skipped\n");
-		return 0;
-	}
-
-	ret = test_cq_overflow(&ring);
-	if (ret) {
-		fprintf(stderr, "test_cq_overflow failed\n");
-		return 1;
-	}
-
-	return 0;
-}
diff --git a/test/cq-overflow.c b/test/cq-overflow.c
index 274a815..945dc93 100644
--- a/test/cq-overflow.c
+++ b/test/cq-overflow.c
@@ -177,98 +177,6 @@ static int reap_events(struct io_uring *ring, unsigned nr_events, int do_wait)
 	return i ? i : ret;
 }
 
-/*
- * Setup ring with CQ_NODROP and check we get -EBUSY on trying to submit new IO
- * on an overflown ring, and that we get all the events (even overflows) when
- * we finally reap them.
- */
-static int test_overflow_nodrop(void)
-{
-	struct __kernel_timespec ts;
-	struct io_uring_sqe *sqe;
-	struct io_uring_params p;
-	struct io_uring ring;
-	unsigned pending;
-	int ret, i, j;
-
-	memset(&p, 0, sizeof(p));
-	ret = io_uring_queue_init_params(4, &ring, &p);
-	if (ret) {
-		fprintf(stderr, "io_uring_queue_init failed %d\n", ret);
-		return 1;
-	}
-	if (!(p.features & IORING_FEAT_NODROP)) {
-		fprintf(stdout, "FEAT_NODROP not supported, skipped\n");
-		return 0;
-	}
-
-	ts.tv_sec = 0;
-	ts.tv_nsec = 10000000;
-
-	/* submit 4x4 SQEs, should overflow the ring by 8 */
-	pending = 0;
-	for (i = 0; i < 4; i++) {
-		for (j = 0; j < 4; j++) {
-			sqe = io_uring_get_sqe(&ring);
-			if (!sqe) {
-				fprintf(stderr, "get sqe failed\n");
-				goto err;
-			}
-
-			io_uring_prep_timeout(sqe, &ts, -1U, 0);
-			sqe->user_data = (i * 4) + j;
-		}
-
-		ret = io_uring_submit(&ring);
-		if (ret <= 0) {
-			if (ret == -EBUSY)
-				break;
-			fprintf(stderr, "sqe submit failed: %d, %d\n", ret, pending);
-			goto err;
-		}
-		pending += ret;
-	}
-
-	/* wait for timers to fire */
-	usleep(2 * 10000);
-
-	/*
-	 * We should have 16 pending CQEs now, 8 of them in the overflow list. Any
-	 * attempt to queue more IO should return -EBUSY
-	 */
-	sqe = io_uring_get_sqe(&ring);
-	if (!sqe) {
-		fprintf(stderr, "get sqe failed\n");
-		goto err;
-	}
-
-	io_uring_prep_nop(sqe);
-	ret = io_uring_submit(&ring);
-	if (ret != -EBUSY) {
-		fprintf(stderr, "expected sqe submit busy: %d\n", ret);
-		goto err;
-	}
-
-	/* reap the events we should have available */
-	ret = reap_events(&ring, pending, 1);
-	if (ret < 0) {
-		fprintf(stderr, "ret=%d\n", ret);
-		goto err;
-	}
-
-	if (*ring.cq.koverflow) {
-		fprintf(stderr, "cq ring overflow %d, expected 0\n",
-				*ring.cq.koverflow);
-		goto err;
-	}
-
-	io_uring_queue_exit(&ring);
-	return 0;
-err:
-	io_uring_queue_exit(&ring);
-	return 1;
-}
-
 /*
  * Submit some NOPs and watch if the overflow is correct
  */
@@ -333,106 +241,6 @@ err:
 	return 1;
 }
 
-/*
- * Test attempted submit with overflown cq ring that can't get flushed
- */
-static int test_overflow_nodrop_submit_ebusy(void)
-{
-	struct __kernel_timespec ts;
-	struct io_uring_sqe *sqe;
-	struct io_uring_params p;
-	struct io_uring ring;
-	unsigned pending;
-	int ret, i, j;
-
-	memset(&p, 0, sizeof(p));
-	ret = io_uring_queue_init_params(4, &ring, &p);
-	if (ret) {
-		fprintf(stderr, "io_uring_queue_init failed %d\n", ret);
-		return 1;
-	}
-	if (!(p.features & IORING_FEAT_NODROP)) {
-		fprintf(stdout, "FEAT_NODROP not supported, skipped\n");
-		return 0;
-	}
-
-	ts.tv_sec = 1;
-	ts.tv_nsec = 0;
-
-	/* submit 4x4 SQEs, should overflow the ring by 8 */
-	pending = 0;
-	for (i = 0; i < 4; i++) {
-		for (j = 0; j < 4; j++) {
-			sqe = io_uring_get_sqe(&ring);
-			if (!sqe) {
-				fprintf(stderr, "get sqe failed\n");
-				goto err;
-			}
-
-			io_uring_prep_timeout(sqe, &ts, -1U, 0);
-			sqe->user_data = (i * 4) + j;
-		}
-
-		ret = io_uring_submit(&ring);
-		if (ret <= 0) {
-			fprintf(stderr, "sqe submit failed: %d, %d\n", ret, pending);
-			goto err;
-		}
-		pending += ret;
-	}
-
-	/* wait for timers to fire */
-	usleep(1100000);
-
-	/*
-	 * We should have 16 pending CQEs now, 8 of them in the overflow list. Any
-	 * attempt to queue more IO should return -EBUSY
-	 */
-	sqe = io_uring_get_sqe(&ring);
-	if (!sqe) {
-		fprintf(stderr, "get sqe failed\n");
-		goto err;
-	}
-
-	io_uring_prep_nop(sqe);
-	ret = io_uring_submit(&ring);
-	if (ret != -EBUSY) {
-		fprintf(stderr, "expected sqe submit busy: %d\n", ret);
-		goto err;
-	}
-
-	/*
-	 * Now peek existing events so the CQ ring is empty, apart from the
-	 * backlog
-	 */
-	ret = reap_events(&ring, pending, 0);
-	if (ret < 0) {
-		fprintf(stderr, "ret=%d\n", ret);
-		goto err;
-	} else if (ret < 8) {
-		fprintf(stderr, "only found %d events, expected 8\n", ret);
-		goto err;
-	}
-
-	/*
-	 * We should now be able to submit our previous nop that's still
-	 * in the sq ring, as the kernel can flush the existing backlog
-	 * to the now empty CQ ring.
-	 */
-	ret = io_uring_submit(&ring);
-	if (ret != 1) {
-		fprintf(stderr, "submit got %d, expected 1\n", ret);
-		goto err;
-	}
-
-	io_uring_queue_exit(&ring);
-	return 0;
-err:
-	io_uring_queue_exit(&ring);
-	return 1;
-}
-
-
 int main(int argc, char *argv[])
 {
 	unsigned iters, drops;
@@ -448,18 +256,6 @@ int main(int argc, char *argv[])
 		return ret;
 	}
 
-	ret = test_overflow_nodrop();
-	if (ret) {
-		printf("test_overflow_nodrop failed\n");
-		return ret;
-	}
-
-	ret = test_overflow_nodrop_submit_ebusy();
-	if (ret) {
-		fprintf(stderr, "test_overflow_npdrop_submit_ebusy failed\n");
-		return ret;
-	}
-
 	t_create_file(".basic-rw", FILE_SIZE);
 
 	vecs = t_create_buffers(BUFFERS, BS);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH liburing] tests: remove -EBUSY on CQE backlog tests
  2021-04-20 22:01 [PATCH liburing] tests: remove -EBUSY on CQE backlog tests Pavel Begunkov
@ 2021-04-21 17:03 ` Jens Axboe
  0 siblings, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2021-04-21 17:03 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 4/20/21 4:01 PM, Pavel Begunkov wrote:
> From 5.13 the kernel doesn't limit users submission with CQE backlog,
> that was previously failed with -EBUSY. Remove related tests.

Applied, thanks.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-04-21 17:03 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-04-20 22:01 [PATCH liburing] tests: remove -EBUSY on CQE backlog tests Pavel Begunkov
2021-04-21 17:03 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox