]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
ceph: cleanup in __ceph_do_pending_vmtruncate() method
authorViacheslav Dubeyko <Slava.Dubeyko@ibm.com>
Wed, 30 Jul 2025 18:54:11 +0000 (11:54 -0700)
committerIlya Dryomov <idryomov@gmail.com>
Wed, 11 Feb 2026 18:19:16 +0000 (19:19 +0100)
The Coverity Scan service has detected an unchecked return
value in __ceph_do_pending_vmtruncate() method [1].

The CID 114041 contains explanation: " Calling
filemap_write_and_wait_range without checking return value.
If the function returns an error value, the error value
may be mistaken for a normal value. Value returned from
a function is not checked for errors before being used.
(CWE-252)".

The patch adds the checking of returned value of
filemap_write_and_wait_range() and reporting the error
message if something wrong is happened during the call.
It reworks the logic by removing the jump to retry
label because it could be the reason of potential
infinite loop in the case of error condition during
the filemap_write_and_wait_range() call. It was removed
the check to == ci->i_truncate_pagecache_size because
the to variable is set by ci->i_truncate_pagecache_size
in the above code logic. The uneccessary finish variable
has been removed because the to variable always has
ci->i_truncate_pagecache_size value. Now if the condition
ci->i_truncate_pending == 0 is true then logic will jump
to the end of the function and wake_up_all(&ci->i_cap_wq)
will be called.

[1] https://scan5.scan.coverity.com/#/project-view/64304/10063?selectedIssue=114041

cc: Alex Markuze <amarkuze@redhat.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Ceph Development <ceph-devel@vger.kernel.org>
Signed-off-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
fs/ceph/inode.c

index 5f56c5d242454a189ccd932d3ee56275363b28cc..ac78d90a0bac92996d1534093f6ac54bd734c602 100644 (file)
@@ -2273,17 +2273,17 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
        struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 to;
-       int wrbuffer_refs, finish = 0;
+       int wrbuffer_refs;
+       int err;
 
        mutex_lock(&ci->i_truncate_mutex);
-retry:
        spin_lock(&ci->i_ceph_lock);
+
+       wrbuffer_refs = ci->i_wrbuffer_ref;
        if (ci->i_truncate_pending == 0) {
                doutc(cl, "%p %llx.%llx none pending\n", inode,
                      ceph_vinop(inode));
-               spin_unlock(&ci->i_ceph_lock);
-               mutex_unlock(&ci->i_truncate_mutex);
-               return;
+               goto out_unlock;
        }
 
        /*
@@ -2294,9 +2294,14 @@ retry:
                spin_unlock(&ci->i_ceph_lock);
                doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
                      ceph_vinop(inode));
-               filemap_write_and_wait_range(&inode->i_data, 0,
-                                            inode->i_sb->s_maxbytes);
-               goto retry;
+               err = filemap_write_and_wait_range(&inode->i_data, 0,
+                                                  inode->i_sb->s_maxbytes);
+               spin_lock(&ci->i_ceph_lock);
+
+               if (unlikely(err)) {
+                       pr_err_client(cl, "failed of flushing snaps: err %d\n",
+                                       err);
+               }
        }
 
        /* there should be no reader or writer */
@@ -2312,20 +2317,17 @@ retry:
        truncate_pagecache(inode, to);
 
        spin_lock(&ci->i_ceph_lock);
-       if (to == ci->i_truncate_pagecache_size) {
-               ci->i_truncate_pending = 0;
-               finish = 1;
-       }
-       spin_unlock(&ci->i_ceph_lock);
-       if (!finish)
-               goto retry;
+       ci->i_truncate_pending = 0;
 
+out_unlock:
+       spin_unlock(&ci->i_ceph_lock);
        mutex_unlock(&ci->i_truncate_mutex);
 
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, 0);
 
        wake_up_all(&ci->i_cap_wq);
+       return;
 }
 
 static void ceph_inode_work(struct work_struct *work)