]> git.apps.os.sepia.ceph.com Git - ceph-client.git/commitdiff
io_uring/kbuf: always use READ_ONCE() to read ring provided buffer lengths
authorJens Axboe <axboe@kernel.dk>
Wed, 27 Aug 2025 21:27:30 +0000 (15:27 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 28 Aug 2025 11:48:34 +0000 (05:48 -0600)
Since the buffers are mapped from userspace, it is prudent to use
READ_ONCE() to read the value into a local variable, and use that for
any other actions taken. Having a stable read of the buffer length
avoids worrying about it changing after checking, or being read multiple
times.

Similarly, the buffer may well change in between it being picked and
being committed. Ensure the looping for incremental ring buffer commit
stops if it hits a zero sized buffer, as no further progress can be made
at that point.

Fixes: ae98dbf43d75 ("io_uring/kbuf: add support for incremental buffer consumption")
Link: https://lore.kernel.org/io-uring/tencent_000C02641F6250C856D0C26228DE29A3D30A@qq.com/
Reported-by: Qingyue Zhang <chunzhennn@qq.com>
Reported-by: Suoxing Zhang <aftern00n@qq.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/kbuf.c

index 81a13338dfab3ee243552b0f009b37127e2d6d34..19a8bde5e1e1c3cf87932ac7d8f2bba79b120773 100644 (file)
@@ -36,15 +36,19 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
 {
        while (len) {
                struct io_uring_buf *buf;
-               u32 this_len;
+               u32 buf_len, this_len;
 
                buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
-               this_len = min_t(u32, len, buf->len);
-               buf->len -= this_len;
-               if (buf->len) {
+               buf_len = READ_ONCE(buf->len);
+               this_len = min_t(u32, len, buf_len);
+               buf_len -= this_len;
+               /* Stop looping for invalid buffer length of 0 */
+               if (buf_len || !this_len) {
                        buf->addr += this_len;
+                       buf->len = buf_len;
                        return false;
                }
+               buf->len = 0;
                bl->head++;
                len -= this_len;
        }
@@ -159,6 +163,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
        __u16 tail, head = bl->head;
        struct io_uring_buf *buf;
        void __user *ret;
+       u32 buf_len;
 
        tail = smp_load_acquire(&br->tail);
        if (unlikely(tail == head))
@@ -168,8 +173,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
                req->flags |= REQ_F_BL_EMPTY;
 
        buf = io_ring_head_to_buf(br, head, bl->mask);
-       if (*len == 0 || *len > buf->len)
-               *len = buf->len;
+       buf_len = READ_ONCE(buf->len);
+       if (*len == 0 || *len > buf_len)
+               *len = buf_len;
        req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
        req->buf_list = bl;
        req->buf_index = buf->bid;
@@ -265,7 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
 
        req->buf_index = buf->bid;
        do {
-               u32 len = buf->len;
+               u32 len = READ_ONCE(buf->len);
 
                /* truncate end piece, if needed, for non partial buffers */
                if (len > arg->max_len) {