1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

io_uring-6.19-20260102

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmlX7O8QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpkzbD/0SoEnTZ+jlbJojq6eAFtYAU3ial6sRdKC9
 15+WqlsMN5MHoV/xLMqHGxofpxCyXMmZZSPholWaUIiGJDcf4Q4olFFDTAgZPZYk
 XxpN9KeE4/n17eFXe+TE/D172MVM0gt9QbJFoV+TLyayrGiB5QyocH6Vg4FoWvjr
 YvyicIRE3SLiBQ8zdfPC4SR28VBE3LKZxjZJxr2HQjJQw4O4/+gKkYz7upACc4Xk
 qN3JioIayuM3hrqcBSm7P0t4tlTCYHZvcGr7WI26CV6hcHD7j7N9jOVPZb4ce8et
 GIYwASYx4FTPrzAebQXXNL39RjoSeaRa/ppcdFHbT9ZZkI9yY9g3umg3kEml8RkF
 DFFwmPxlz2RuRLs+KdZ4UjLRf14W5qYlcThN7bgpTH4H0XUeDzT7HI9BiXBC7gjl
 p0Z1Y3NPAzMxil48ZPpopJxmQGcBIC8fMnDT0KVpvuILrN3ME0TMg82lQ2X/eTwf
 S/oPLebqqWy4N8Ff5x+GYmWxZvFEOxmO0AoSSiN3nlZ1skNqRlpMISTsFJXy+luq
 V31d0cLBfrWL9MNTE+yjLNT/5pc1l+HgVLxdoxCioEKWXXdB27YEDlh0CVNtjZ9j
 /ZVMJcZhzRBUvWLUvzQrtY65m0I8h6XYJAr7TXbbsL70yFAsgQmBUZPklqe6eijy
 HFYYO4vnJg==
 =KgoB
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.19-20260102' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull io_uring fixes from Jens Axboe:

 - Removed dead argument length for io_uring_validate_mmap_request()

 - Use GFP_NOWAIT for overflow CQEs on legacy ring setups rather than
   GFP_ATOMIC, which makes it play nicer with memcg limits

 - Fix a potential circular locking issue with tctx node removal and
   exec based cancelations

* tag 'io_uring-6.19-20260102' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  io_uring/memmap: drop unused sz param in io_uring_validate_mmap_request()
  io_uring/tctx: add separate lock for list of tctx's in ctx
  io_uring: use GFP_NOWAIT for overflow CQEs on legacy rings
This commit is contained in:
Linus Torvalds 2026-01-02 12:07:55 -08:00
commit 509b5b1152
6 changed files with 28 additions and 11 deletions

View File

@ -424,11 +424,17 @@ struct io_ring_ctx {
struct user_struct *user; struct user_struct *user;
struct mm_struct *mm_account; struct mm_struct *mm_account;
/*
* List of tctx nodes for this ctx, protected by tctx_lock. For
* cancelation purposes, nests under uring_lock.
*/
struct list_head tctx_list;
struct mutex tctx_lock;
/* ctx exit and cancelation */ /* ctx exit and cancelation */
struct llist_head fallback_llist; struct llist_head fallback_llist;
struct delayed_work fallback_work; struct delayed_work fallback_work;
struct work_struct exit_work; struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp; struct completion ref_comp;
/* io-wq management, e.g. thread count */ /* io-wq management, e.g. thread count */

View File

@ -184,7 +184,9 @@ static int __io_async_cancel(struct io_cancel_data *cd,
} while (1); } while (1);
/* slow path, try all io-wq's */ /* slow path, try all io-wq's */
__set_current_state(TASK_RUNNING);
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
mutex_lock(&ctx->tctx_lock);
ret = -ENOENT; ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) { list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
ret = io_async_cancel_one(node->task->io_uring, cd); ret = io_async_cancel_one(node->task->io_uring, cd);
@ -194,6 +196,7 @@ static int __io_async_cancel(struct io_cancel_data *cd,
nr++; nr++;
} }
} }
mutex_unlock(&ctx->tctx_lock);
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
return all ? nr : ret; return all ? nr : ret;
} }
@ -484,6 +487,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
bool ret = false; bool ret = false;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
mutex_lock(&ctx->tctx_lock);
list_for_each_entry(node, &ctx->tctx_list, ctx_node) { list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring; struct io_uring_task *tctx = node->task->io_uring;
@ -496,6 +500,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND); ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
} }
mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
return ret; return ret;

View File

@ -340,6 +340,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->ltimeout_list); INIT_LIST_HEAD(&ctx->ltimeout_list);
init_llist_head(&ctx->work_llist); init_llist_head(&ctx->work_llist);
INIT_LIST_HEAD(&ctx->tctx_list); INIT_LIST_HEAD(&ctx->tctx_list);
mutex_init(&ctx->tctx_lock);
ctx->submit_state.free_list.next = NULL; ctx->submit_state.free_list.next = NULL;
INIT_HLIST_HEAD(&ctx->waitid_list); INIT_HLIST_HEAD(&ctx->waitid_list);
xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC); xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC);
@ -864,7 +865,7 @@ static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx,
{ {
struct io_overflow_cqe *ocqe; struct io_overflow_cqe *ocqe;
ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC); ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_NOWAIT);
return io_cqring_add_overflow(ctx, ocqe); return io_cqring_add_overflow(ctx, ocqe);
} }
@ -3045,6 +3046,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
exit.ctx = ctx; exit.ctx = ctx;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
mutex_lock(&ctx->tctx_lock);
while (!list_empty(&ctx->tctx_list)) { while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout)); WARN_ON_ONCE(time_after(jiffies, timeout));
@ -3056,6 +3058,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
if (WARN_ON_ONCE(ret)) if (WARN_ON_ONCE(ret))
continue; continue;
mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
/* /*
* See comment above for * See comment above for
@ -3064,7 +3067,9 @@ static __cold void io_ring_exit_work(struct work_struct *work)
*/ */
wait_for_completion_interruptible(&exit.completion); wait_for_completion_interruptible(&exit.completion);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
mutex_lock(&ctx->tctx_lock);
} }
mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);

View File

@ -268,8 +268,7 @@ static void *io_region_validate_mmap(struct io_ring_ctx *ctx,
return io_region_get_ptr(mr); return io_region_get_ptr(mr);
} }
static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff, static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff)
size_t sz)
{ {
struct io_ring_ctx *ctx = file->private_data; struct io_ring_ctx *ctx = file->private_data;
struct io_mapped_region *region; struct io_mapped_region *region;
@ -304,7 +303,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
guard(mutex)(&ctx->mmap_lock); guard(mutex)(&ctx->mmap_lock);
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff);
if (IS_ERR(ptr)) if (IS_ERR(ptr))
return PTR_ERR(ptr); return PTR_ERR(ptr);
@ -336,7 +335,7 @@ unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
guard(mutex)(&ctx->mmap_lock); guard(mutex)(&ctx->mmap_lock);
ptr = io_uring_validate_mmap_request(filp, pgoff, len); ptr = io_uring_validate_mmap_request(filp, pgoff);
if (IS_ERR(ptr)) if (IS_ERR(ptr))
return -ENOMEM; return -ENOMEM;
@ -386,7 +385,7 @@ unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
guard(mutex)(&ctx->mmap_lock); guard(mutex)(&ctx->mmap_lock);
ptr = io_uring_validate_mmap_request(file, pgoff, len); ptr = io_uring_validate_mmap_request(file, pgoff);
if (IS_ERR(ptr)) if (IS_ERR(ptr))
return PTR_ERR(ptr); return PTR_ERR(ptr);

View File

@ -320,6 +320,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
return 0; return 0;
/* now propagate the restriction to all registered users */ /* now propagate the restriction to all registered users */
mutex_lock(&ctx->tctx_lock);
list_for_each_entry(node, &ctx->tctx_list, ctx_node) { list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
tctx = node->task->io_uring; tctx = node->task->io_uring;
if (WARN_ON_ONCE(!tctx->io_wq)) if (WARN_ON_ONCE(!tctx->io_wq))
@ -330,6 +331,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
/* ignore errors, it always returns zero anyway */ /* ignore errors, it always returns zero anyway */
(void)io_wq_max_workers(tctx->io_wq, new_count); (void)io_wq_max_workers(tctx->io_wq, new_count);
} }
mutex_unlock(&ctx->tctx_lock);
return 0; return 0;
err: err:
if (sqd) { if (sqd) {

View File

@ -136,9 +136,9 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
return ret; return ret;
} }
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->tctx_lock);
list_add(&node->ctx_node, &ctx->tctx_list); list_add(&node->ctx_node, &ctx->tctx_list);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->tctx_lock);
} }
return 0; return 0;
} }
@ -176,9 +176,9 @@ __cold void io_uring_del_tctx_node(unsigned long index)
WARN_ON_ONCE(current != node->task); WARN_ON_ONCE(current != node->task);
WARN_ON_ONCE(list_empty(&node->ctx_node)); WARN_ON_ONCE(list_empty(&node->ctx_node));
mutex_lock(&node->ctx->uring_lock); mutex_lock(&node->ctx->tctx_lock);
list_del(&node->ctx_node); list_del(&node->ctx_node);
mutex_unlock(&node->ctx->uring_lock); mutex_unlock(&node->ctx->tctx_lock);
if (tctx->last == node->ctx) if (tctx->last == node->ctx)
tctx->last = NULL; tctx->last = NULL;