Skip to content

Commit ebdfefc

Browse files
committed
io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used
If we setup the ring with SQPOLL, then that polling thread has its own io-wq setup. This means that if the application uses IORING_REGISTER_IOWQ_AFF to set the io-wq affinity, we should not be setting it for the invoking task, but rather the sqpoll task. Add an sqpoll helper that parks the thread and updates the affinity, and use that one if we're using SQPOLL. Fixes: fe76421 ("io_uring: allow user configurable IO thread CPU affinity") Cc: [email protected] # 5.10+ Link: axboe/liburing#884 Signed-off-by: Jens Axboe <[email protected]>
1 parent d246c75 commit ebdfefc

File tree

5 files changed

+41
-15
lines changed

5 files changed

+41
-15
lines changed

io_uring/io-wq.c

+6-3
Original file line numberDiff line numberDiff line change
@@ -1306,13 +1306,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
13061306
return __io_wq_cpu_online(wq, cpu, false);
13071307
}
13081308

1309-
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1309+
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
13101310
{
1311+
if (!tctx || !tctx->io_wq)
1312+
return -EINVAL;
1313+
13111314
rcu_read_lock();
13121315
if (mask)
1313-
cpumask_copy(wq->cpu_mask, mask);
1316+
cpumask_copy(tctx->io_wq->cpu_mask, mask);
13141317
else
1315-
cpumask_copy(wq->cpu_mask, cpu_possible_mask);
1318+
cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
13161319
rcu_read_unlock();
13171320

13181321
return 0;

io_uring/io-wq.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq);
5050
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
5151
void io_wq_hash_work(struct io_wq_work *work, void *val);
5252

53-
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
53+
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
5454
int io_wq_max_workers(struct io_wq *wq, int *new_count);
5555

5656
static inline bool io_wq_is_hashed(struct io_wq_work *work)

io_uring/io_uring.c

+18-11
Original file line numberDiff line numberDiff line change
@@ -4183,16 +4183,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
41834183
return 0;
41844184
}
41854185

4186+
static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
4187+
cpumask_var_t new_mask)
4188+
{
4189+
int ret;
4190+
4191+
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
4192+
ret = io_wq_cpu_affinity(current->io_uring, new_mask);
4193+
} else {
4194+
mutex_unlock(&ctx->uring_lock);
4195+
ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
4196+
mutex_lock(&ctx->uring_lock);
4197+
}
4198+
4199+
return ret;
4200+
}
4201+
41864202
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
41874203
void __user *arg, unsigned len)
41884204
{
4189-
struct io_uring_task *tctx = current->io_uring;
41904205
cpumask_var_t new_mask;
41914206
int ret;
41924207

4193-
if (!tctx || !tctx->io_wq)
4194-
return -EINVAL;
4195-
41964208
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
41974209
return -ENOMEM;
41984210

@@ -4213,19 +4225,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
42134225
return -EFAULT;
42144226
}
42154227

4216-
ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
4228+
ret = __io_register_iowq_aff(ctx, new_mask);
42174229
free_cpumask_var(new_mask);
42184230
return ret;
42194231
}
42204232

42214233
static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
42224234
{
4223-
struct io_uring_task *tctx = current->io_uring;
4224-
4225-
if (!tctx || !tctx->io_wq)
4226-
return -EINVAL;
4227-
4228-
return io_wq_cpu_affinity(tctx->io_wq, NULL);
4235+
return __io_register_iowq_aff(ctx, NULL);
42294236
}
42304237

42314238
static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,

io_uring/sqpoll.c

+15
Original file line numberDiff line numberDiff line change
@@ -421,3 +421,18 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
421421
io_sq_thread_finish(ctx);
422422
return ret;
423423
}
424+
425+
__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
426+
cpumask_var_t mask)
427+
{
428+
struct io_sq_data *sqd = ctx->sq_data;
429+
int ret = -EINVAL;
430+
431+
if (sqd) {
432+
io_sq_thread_park(sqd);
433+
ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
434+
io_sq_thread_unpark(sqd);
435+
}
436+
437+
return ret;
438+
}

io_uring/sqpoll.h

+1
Original file line numberDiff line numberDiff line change
@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
2727
void io_sq_thread_unpark(struct io_sq_data *sqd);
2828
void io_put_sq_data(struct io_sq_data *sqd);
2929
void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
30+
int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);

0 commit comments

Comments
 (0)