int fd = r;
r = ceph_fdopendir(mnt, fd, dirp);
- ceph_close(mnt, fd);
+ if (r < 0) {
+ ceph_close(mnt, fd);
+ }
return r;
}
return 0;
}
-void PeerReplayer::post_sync_close_handles(const FHandles &fh) {
- dout(20) << dendl;
-
- // @FHandles.r_fd_dir_root is closed in @unregister_directory since
- // its used to acquire an exclusive lock on remote dir_root.
- ceph_close(m_local_mount, fh.c_fd);
- ceph_close(fh.p_mnt, fh.p_fd);
-}
-
int PeerReplayer::do_synchronize(const std::string &dir_root, const Snapshot ¤t) {
dout(20) << ": dir_root=" << dir_root << ", current=" << current << dendl;
FHandles fh;
return r;
}
- BOOST_SCOPE_EXIT_ALL( (this)(&fh) ) {
- post_sync_close_handles(fh);
- };
-
// record that we are going to "dirty" the data under this
// directory root
auto snap_id_str{stringify(current.second)};
if (r < 0) {
derr << ": error setting \"ceph.mirror.dirty_snap_id\" on dir_root=" << dir_root
<< ": " << cpp_strerror(r) << dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
if (r < 0) {
derr << ": failed to stat snap=" << current.first << ": " << cpp_strerror(r)
<< dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
if (r < 0) {
derr << ": failed to open local snap=" << current.first << ": " << cpp_strerror(r)
<< dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
+ // starting from this point we shouldn't care about manual closing of fh.c_fd,
+ // it will be closed automatically when bound tdirp is closed.
std::stack<SyncEntry> sync_stack;
sync_stack.emplace(SyncEntry(".", tdirp, tstx));
sync_stack.pop();
}
+ dout(20) << " cur:" << fh.c_fd
+ << " prev:" << fh.p_fd
+ << " ret = " << r
+ << dendl;
+
+ // @FHandles.r_fd_dir_root is closed in @unregister_directory since
+ // its used to acquire an exclusive lock on remote dir_root.
+
+ // c_fd has been used in ceph_fdopendir call so
+ // there is no need to close this fd manually.
+ ceph_close(fh.p_mnt, fh.p_fd);
+
return r;
}
return r;
}
- BOOST_SCOPE_EXIT_ALL( (this)(&fh) ) {
- post_sync_close_handles(fh);
- };
// record that we are going to "dirty" the data under this directory root
auto snap_id_str{stringify(current.second)};
if (r < 0) {
derr << ": error setting \"ceph.mirror.dirty_snap_id\" on dir_root=" << dir_root
<< ": " << cpp_strerror(r) << dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
if (r < 0) {
derr << ": failed to stat snap=" << current.first << ": " << cpp_strerror(r)
<< dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
dout(0) << ": backing off r=" << r << dendl;
break;
}
- r = pre_sync_check_and_open_handles(dir_root, current, prev, &fh);
- if (r < 0) {
- dout(5) << ": cannot proceed with sync: " << cpp_strerror(r) << dendl;
- return r;
- }
dout(20) << ": " << sync_queue.size() << " entries in queue" << dendl;
const auto &queue_entry = sync_queue.front();
stringify((*prev).first).c_str(), current.first.c_str(), &sd_info);
if (r != 0) {
derr << ": failed to open snapdiff, r=" << r << dendl;
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
while (0 < (r = ceph_readdir_snapdiff(&sd_info, &sd_entry))) {
if (r < 0) {
derr << ": failed to read directory=" << epath << dendl;
ceph_close_snapdiff(&sd_info);
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}
}
sync_queue.pop();
}
+
+ dout(20) << " current:" << fh.c_fd
+ << " prev:" << fh.p_fd
+ << " ret = " << r
+ << dendl;
+
+ // @FHandles.r_fd_dir_root is closed in @unregister_directory since
+ // its used to acquire an exclusive lock on remote dir_root.
+
+ ceph_close(m_local_mount, fh.c_fd);
+ ceph_close(fh.p_mnt, fh.p_fd);
return r;
}