]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
ceph: fscrypt support for writepages
authorJeff Layton <jlayton@kernel.org>
Mon, 10 Jan 2022 17:48:23 +0000 (12:48 -0500)
committerJeff Layton <jlayton@kernel.org>
Tue, 31 May 2022 15:50:01 +0000 (11:50 -0400)
Add the appropriate machinery to write back dirty data with encryption.

Reviewed-by: Xiubo Li <xiubli@redhat.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
fs/ceph/addr.c
fs/ceph/crypto.h

index ec2d4071b110938ecda46a2b77520fdfe390569b..79724b9c9bd752e8e9c2627f9504d5967716ef28 100644 (file)
@@ -547,10 +547,12 @@ static u64 get_writepages_data_length(struct inode *inode,
                                      struct page *page, u64 start)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_snap_context *snapc = page_snap_context(page);
+       struct ceph_snap_context *snapc;
        struct ceph_cap_snap *capsnap = NULL;
        u64 end = i_size_read(inode);
+       u64 ret;
 
+       snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
        if (snapc != ci->i_head_snapc) {
                bool found = false;
                spin_lock(&ci->i_ceph_lock);
@@ -565,9 +567,12 @@ static u64 get_writepages_data_length(struct inode *inode,
                spin_unlock(&ci->i_ceph_lock);
                WARN_ON(!found);
        }
-       if (end > page_offset(page) + thp_size(page))
-               end = page_offset(page) + thp_size(page);
-       return end > start ? end - start : 0;
+       if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
+               end = ceph_fscrypt_page_offset(page) + thp_size(page);
+       ret = end > start ? end - start : 0;
+       if (ret && fscrypt_is_bounce_page(page))
+               ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
+       return ret;
 }
 
 /*
@@ -796,6 +801,11 @@ static void writepages_finish(struct ceph_osd_request *req)
                total_pages += num_pages;
                for (j = 0; j < num_pages; j++) {
                        page = osd_data->pages[j];
+                       if (fscrypt_is_bounce_page(page)) {
+                               page = fscrypt_pagecache_page(page);
+                               fscrypt_free_bounce_page(osd_data->pages[j]);
+                               osd_data->pages[j] = page;
+                       }
                        BUG_ON(!page);
                        WARN_ON(!PageUptodate(page));
 
@@ -1057,9 +1067,28 @@ get_more_pages:
                                    fsc->mount_options->congestion_kb))
                                fsc->write_congested = true;
 
-                       pages[locked_pages++] = page;
-                       pvec.pages[i] = NULL;
+                       if (IS_ENCRYPTED(inode)) {
+                               pages[locked_pages] =
+                                       fscrypt_encrypt_pagecache_blocks(page,
+                                               PAGE_SIZE, 0,
+                                               locked_pages ? GFP_NOWAIT : GFP_NOFS);
+                               if (IS_ERR(pages[locked_pages])) {
+                                       if (PTR_ERR(pages[locked_pages]) == -EINVAL)
+                                               pr_err("%s: inode->i_blkbits=%hhu\n",
+                                                       __func__, inode->i_blkbits);
+                                       /* better not fail on first page! */
+                                       BUG_ON(locked_pages == 0);
+                                       pages[locked_pages] = NULL;
+                                       redirty_page_for_writepage(wbc, page);
+                                       unlock_page(page);
+                                       break;
+                               }
+                               ++locked_pages;
+                       } else {
+                               pages[locked_pages++] = page;
+                       }
 
+                       pvec.pages[i] = NULL;
                        len += thp_size(page);
                }
 
@@ -1087,7 +1116,7 @@ get_more_pages:
                }
 
 new_request:
-               offset = page_offset(pages[0]);
+               offset = ceph_fscrypt_page_offset(pages[0]);
                len = wsize;
 
                req = ceph_osdc_new_request(&fsc->client->osdc,
@@ -1108,8 +1137,8 @@ new_request:
                                                ceph_wbc.truncate_size, true);
                        BUG_ON(IS_ERR(req));
                }
-               BUG_ON(len < page_offset(pages[locked_pages - 1]) +
-                            thp_size(page) - offset);
+               BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
+                            thp_size(pages[locked_pages - 1]) - offset);
 
                req->r_callback = writepages_finish;
                req->r_inode = inode;
@@ -1119,7 +1148,9 @@ new_request:
                data_pages = pages;
                op_idx = 0;
                for (i = 0; i < locked_pages; i++) {
-                       u64 cur_offset = page_offset(pages[i]);
+                       struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
+
+                       u64 cur_offset = page_offset(page);
                        /*
                         * Discontinuity in page range? Ceph can handle that by just passing
                         * multiple extents in the write op.
@@ -1148,9 +1179,9 @@ new_request:
                                op_idx++;
                        }
 
-                       set_page_writeback(pages[i]);
+                       set_page_writeback(page);
                        if (caching)
-                               ceph_set_page_fscache(pages[i]);
+                               ceph_set_page_fscache(page);
                        len += thp_size(page);
                }
                ceph_fscache_write_to_cache(inode, offset, len, caching);
@@ -1166,8 +1197,16 @@ new_request:
                                                         offset);
                        len = max(len, min_len);
                }
+               if (IS_ENCRYPTED(inode))
+                       len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+
                dout("writepages got pages at %llu~%llu\n", offset, len);
 
+               if (IS_ENCRYPTED(inode) &&
+                   ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
+                       pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
+                               __func__, offset, len);
+
                osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
                                                 0, from_pool, false);
                osd_req_op_extent_update(req, op_idx, len);
index bfda2220cb78e11938712a39a84dcd671e169f3f..d1726307bdb85bc577542b27f03b2589d69ec1cd 100644 (file)
@@ -154,6 +154,12 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page, u64 of
                                 struct ceph_sparse_extent *map, u32 ext_cnt);
 int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
                                int len, gfp_t gfp);
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+       return fscrypt_is_bounce_page(page) ?  fscrypt_pagecache_page(page) : page;
+}
+
 #else /* CONFIG_FS_ENCRYPTION */
 
 static inline void ceph_fscrypt_set_ops(struct super_block *sb)
@@ -243,6 +249,16 @@ static inline int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **
 {
        return 0;
 }
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+       return page;
+}
 #endif /* CONFIG_FS_ENCRYPTION */
 
-#endif
+static inline loff_t ceph_fscrypt_page_offset(struct page *page)
+{
+       return page_offset(ceph_fscrypt_pagecache_page(page));
+}
+
+#endif /* _CEPH_CRYPTO_H */