if (got_created_ino &&
created_ino.val != target->ino.val) {
ldout(cct, 5) << "create got ino " << created_ino << " but then failed on lookup; EINTR?" << dendl;
- r = -EINTR;
+ r = -CEPHFS_EINTR;
}
if (ptarget)
ptarget->swap(target);
break;
if (blocklisted) {
- request->abort(-EBLOCKLISTED);
+ request->abort(-CEPHFS_EBLOCKLISTED);
break;
}
if (!have_open_session(mds)) {
session = _get_or_open_mds_session(mds);
if (session->state == MetaSession::STATE_REJECTED) {
- request->abort(-EPERM);
+ request->abort(-CEPHFS_EPERM);
break;
}
// wait
lderr(cct) << "mds." << from << " lacks required features '"
<< missing_features << "', closing session " << dendl;
_close_mds_session(session);
- _closed_mds_session(session, -EPERM, true);
+ _closed_mds_session(session, -CEPHFS_EPERM, true);
break;
}
session->mds_features = std::move(m->supported_features);
error_str = "unknown error";
lderr(cct) << "mds." << from << " rejected us (" << error_str << ")" << dendl;
- _closed_mds_session(session, -EPERM, true);
+ _closed_mds_session(session, -CEPHFS_EPERM, true);
}
break;
return;
}
- if (-ESTALE == reply->get_result()) { // see if we can get to proper MDS
+ if (-CEPHFS_ESTALE == reply->get_result()) { // see if we can get to proper MDS
ldout(cct, 20) << "got ESTALE on tid " << request->tid
<< " from mds." << request->mds << dendl;
request->send_to_auth = true;
{
ldout(cct, 1) << __func__ << ": FULL: cancelling outstanding operations "
<< "on " << pool << dendl;
- // Cancel all outstanding ops in this pool with -ENOSPC: it is necessary
+ // Cancel all outstanding ops in this pool with -CEPHFS_ENOSPC: it is necessary
// to do this rather than blocking, because otherwise when we fill up we
// potentially lock caps forever on files with dirty pages, and we need
// to be able to release those caps to the MDS so that it can delete files
// and free up space.
- epoch_t cancelled_epoch = objecter->op_cancel_writes(-ENOSPC, pool);
+ epoch_t cancelled_epoch = objecter->op_cancel_writes(-CEPHFS_ENOSPC, pool);
// For all inodes with layouts in this pool and a pending flush write op
// (i.e. one of the ones we will cancel), we've got to purge_set their data
// Fortunately since we're cancelling everything in a given pool, we don't
// need to know which ops belong to which ObjectSet, we can just blow all
// the un-flushed cached data away and mark any dirty inodes' async_err
- // field with -ENOSPC as long as we're sure all the ops we cancelled were
+ // field with -CEPHFS_ENOSPC as long as we're sure all the ops we cancelled were
// affecting this pool, and all the objectsets we're purging were also
// in this pool.
for (unordered_map<vinodeno_t,Inode*>::iterator i = inode_map.begin();
ldout(cct, 4) << __func__ << ": FULL: inode 0x" << std::hex << i->first << std::dec
<< " has dirty objects, purging and setting ENOSPC" << dendl;
objectcacher->purge_set(&inode->oset);
- inode->set_async_err(-ENOSPC);
+ inode->set_async_err(-CEPHFS_ENOSPC);
}
}
lderr(cct) << "I was blocklisted at osd epoch " << epoch << dendl;
blocklisted = true;
- _abort_mds_sessions(-EBLOCKLISTED);
+ _abort_mds_sessions(-CEPHFS_EBLOCKLISTED);
// Since we know all our OSD ops will fail, cancel them all preemtively,
// so that on an unhealthy cluster we can umount promptly even if e.g.
// some PGs were inaccessible.
- objecter->op_cancel_writes(-EBLOCKLISTED);
+ objecter->op_cancel_writes(-CEPHFS_EBLOCKLISTED);
}
*/
op.con->mark_down();
if (op.on_finish)
- op.on_finish->complete(-ETIMEDOUT);
+ op.on_finish->complete(-CEPHFS_ETIMEDOUT);
}
}
if (is_dir_operation(req)) {
Inode *dir = req->inode();
assert(dir);
- dir->set_async_err(-EIO);
+ dir->set_async_err(-CEPHFS_EIO);
lderr(cct) << "kick_requests_closed drop req of inode(dir) : "
<< dir->ino << " " << req->get_tid() << dendl;
req->unsafe_dir_item.remove_myself();
}
if (req->target) {
InodeRef &in = req->target;
- in->set_async_err(-EIO);
+ in->set_async_err(-CEPHFS_EIO);
lderr(cct) << "kick_requests_closed drop req of inode : "
<< in->ino << " " << req->get_tid() << dendl;
req->unsafe_target_item.remove_myself();
ldout(cct, 10) << "get_caps " << *in << " need " << ccap_string(need)
<< " file_wanted " << ccap_string(file_wanted) << ", EBADF "
<< dendl;
- return -EBADF;
+ return -CEPHFS_EBADF;
}
if ((fh->mode & CEPH_FILE_MODE_WR) && fh->gen != fd_gen)
- return -EBADF;
+ return -CEPHFS_EBADF;
if ((in->flags & I_ERROR_FILELOCK) && fh->has_any_filelocks())
- return -EIO;
+ return -CEPHFS_EIO;
int implemented;
int have = in->caps_issued(&implemented);
if ((need & CEPH_CAP_FILE_WR) && in->auth_cap &&
in->auth_cap->session->readonly)
- return -EROFS;
+ return -CEPHFS_EROFS;
if (in->flags & I_CAP_DROPPED) {
int mds_wanted = in->caps_mds_wanted();
ldout(cct, 8) << __func__ << ": FULL, purging for ENOSPC" << dendl;
objectcacher->purge_set(&in->oset);
if (onfinish) {
- onfinish->complete(-ENOSPC);
+ onfinish->complete(-CEPHFS_ENOSPC);
}
return true;
}
}
caps &= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER;
if (caps && !in->caps_issued_mask(caps, true)) {
- if (err == -EBLOCKLISTED) {
+ if (err == -CEPHFS_EBLOCKLISTED) {
if (in->oset.dirty_or_tx) {
lderr(cct) << __func__ << " still has dirty data on " << *in << dendl;
in->set_async_err(err);
if (perms.uid() != in->uid && (in->mode & S_IRWXG)) {
int ret = _posix_acl_permission(in, perms, want);
- if (ret != -EAGAIN)
+ if (ret != -CEPHFS_EAGAIN)
return ret;
}
// check permissions before doing anything else
if (!in->check_mode(perms, want))
- return -EACCES;
+ return -CEPHFS_EACCES;
return 0;
}
r = 0;
if (strncmp(name, "system.", 7) == 0) {
if ((want & MAY_WRITE) && (perms.uid() != 0 && perms.uid() != in->uid))
- r = -EPERM;
+ r = -CEPHFS_EPERM;
} else {
r = inode_permission(in, perms, want);
}
goto out;
}
- r = -EPERM;
+ r = -CEPHFS_EPERM;
if (mask & CEPH_SETATTR_UID) {
if (perms.uid() != 0 && (perms.uid() != in->uid || stx->stx_uid != in->uid))
goto out;
int r = 0;
switch (in->mode & S_IFMT) {
case S_IFLNK:
- r = -ELOOP;
+ r = -CEPHFS_ELOOP;
goto out;
case S_IFDIR:
if (want & MAY_WRITE) {
- r = -EISDIR;
+ r = -CEPHFS_EISDIR;
goto out;
}
break;
if (r < 0)
goto out;
if (dir->uid != perms.uid() && otherin->uid != perms.uid())
- r = -EPERM;
+ r = -CEPHFS_EPERM;
}
out:
ldout(cct, 3) << __func__ << " " << dir << " = " << r << dendl;
goto out;
}
- r = -EPERM;
+ r = -CEPHFS_EPERM;
if (!S_ISREG(in->mode))
goto out;
lderr(cct) << __func__ << ": gid " << mds_gid << " not in MDS map"
<< dendl;
lderr(cct) << "FSMap: " << *fsmap << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
} else if (mds_spec == "*") {
// It is a wildcard: use all MDSs
if (mds_info.empty()) {
lderr(cct) << __func__ << ": no MDS daemons found" << dendl;
lderr(cct) << "FSMap: " << *fsmap << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
for (const auto& [gid, info] : mds_info) {
if (mds_gid == 0) {
lderr(cct) << __func__ << ": no MDS daemons found by name `" << mds_spec << "'" << dendl;
lderr(cct) << "FSMap: " << *fsmap << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
} else {
auto& info = fsmap->get_info_gid(mds_gid);
ldout(cct, 10) << __func__ << ": resolved name '" << mds_spec
{
RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED);
if (!iref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::unique_lock cl(client_lock);
}
if (non_laggy.size() == 0) {
*outs = "All targeted MDS daemons are laggy";
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
if (metadata.empty()) {
return r;
fscid = fsmap_user->get_fs_cid(resolved_fs_name);
if (fscid == FS_CLUSTER_ID_NONE) {
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
std::ostringstream oss;
req->head.args.getattr.mask = CEPH_STAT_CAP_INODE_ALL;
int res = make_request(req, perms);
if (res < 0) {
- if (res == -EACCES && root) {
+ if (res == -CEPHFS_EACCES && root) {
ldout(cct, 1) << __func__ << " EACCES on parent of mount point; quotas may not work" << dendl;
break;
}
while (!mds_ranks_closing.empty()) {
auto session = mds_sessions.at(*mds_ranks_closing.begin());
// this prunes entry from mds_sessions and mds_ranks_closing
- _closed_mds_session(&session, -ETIMEDOUT);
+ _closed_mds_session(&session, -CEPHFS_ETIMEDOUT);
}
}
if (abort) {
mount_aborted = true;
// Abort all mds sessions
- _abort_mds_sessions(-ENOTCONN);
+ _abort_mds_sessions(-CEPHFS_ENOTCONN);
- objecter->op_cancel_writes(-ENOTCONN);
+ objecter->op_cancel_writes(-CEPHFS_ENOTCONN);
} else {
// flush the mdlog for pending requests, if any
flush_mdlog_sync();
MetaRequest *req = mds_requests.begin()->second;
if (req->op_stamp + cct->_conf->client_mount_timeout < now) {
- req->abort(-ETIMEDOUT);
+ req->abort(-CEPHFS_ETIMEDOUT);
if (req->caller_cond) {
req->kick = true;
req->caller_cond->notify_all();
}
if (!dir->is_dir()) {
- r = -ENOTDIR;
+ r = -CEPHFS_ENOTDIR;
goto done;
}
if (dname.length() > NAME_MAX) {
- r = -ENAMETOOLONG;
+ r = -CEPHFS_ENAMETOOLONG;
goto done;
}
if (!dn->inode && (dir->flags & I_COMPLETE)) {
ldout(cct, 10) << __func__ << " concluded ENOENT locally for "
<< *dir << " dn '" << dname << "'" << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
}
} else {
if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true) &&
(dir->flags & I_COMPLETE)) {
ldout(cct, 10) << __func__ << " concluded ENOENT locally for " << *dir << " dn '" << dname << "'" << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
}
if (alternate_name)
*alternate_name = dn->alternate_name;
} else {
- r = -ENOENT;
+ r = -CEPHFS_ENOENT;
}
touch_dn(dn);
goto done;
Dentry *dn = dir->dir->dentries[name];
if (_dentry_valid(dn)) {
if (expect_null)
- return -EEXIST;
+ return -CEPHFS_EEXIST;
}
*pdn = dn;
} else {
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 10) << __func__ << ": " << path << dendl;
symlinks++;
ldout(cct, 20) << " symlink count " << symlinks << ", value is '" << next->symlink << "'" << dendl;
if (symlinks > MAXSYMLINKS) {
- return -ELOOP;
+ return -CEPHFS_ELOOP;
}
if (i < path.depth() - 1) {
i++;
}
if (!cur)
- return -ENOENT;
+ return -CEPHFS_ENOENT;
if (result) {
result->in = std::move(cur);
result->alternate_name = std::move(alternate_name);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "link" << std::endl;
tout(cct) << relexisting << std::endl;
if (r < 0)
return r;
if (std::string(relpath) == "/") {
- r = -EEXIST;
+ r = -CEPHFS_EEXIST;
return r;
}
filepath path(relpath);
return r;
if (cct->_conf->client_permissions) {
if (S_ISDIR(in->mode)) {
- r = -EPERM;
+ r = -CEPHFS_EPERM;
return r;
}
r = may_hardlink(in.get(), perm);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (std::string(relpath) == "/")
- return -EISDIR;
+ return -CEPHFS_EISDIR;
filepath path(relpath);
string name = path.last_dentry();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relfrom << std::endl;
tout(cct) << relto << std::endl;
if (std::string(relfrom) == "/" || std::string(relto) == "/")
- return -EBUSY;
+ return -CEPHFS_EBUSY;
filepath from(relfrom);
filepath to(relto);
if (r < 0)
return r;
r = may_delete(todir.get(), toname.c_str(), perm);
- if (r < 0 && r != -ENOENT)
+ if (r < 0 && r != -CEPHFS_ENOENT)
return r;
}
r = _rename(fromdir.get(), fromname.c_str(), todir.get(), toname.c_str(), perm, std::move(alternate_name));
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
ldout(cct, 10) << __func__ << ": " << relpath << dendl;
if (std::string(relpath) == "/")
- return -EEXIST;
+ return -CEPHFS_EEXIST;
filepath path(relpath);
string name = path.last_dentry();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 10) << "Client::mkdirs " << relpath << dendl;
tout(cct) << __func__ << std::endl;
break;
cur.swap(next);
}
- if (r!=-ENOENT) return r;
+ if (r!=-CEPHFS_ENOENT) return r;
ldout(cct, 20) << __func__ << " got through " << i << " directories on path " << relpath << dendl;
//make new directory at each level
for (; i<path.depth(); ++i) {
r = _mkdir(cur.get(), path[i].c_str(), mode, perms, &next);
//check proper creation/existence
- if(-EEXIST == r && i < path.depth() - 1) {
+ if(-CEPHFS_EEXIST == r && i < path.depth() - 1) {
r = _lookup(cur.get(), path[i].c_str(), CEPH_CAP_AUTH_SHARED, &next, perms);
}
if (r < 0)
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (std::string(relpath) == "/")
- return -EBUSY;
+ return -CEPHFS_EBUSY;
filepath path(relpath);
string name = path.last_dentry();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << rdev << std::endl;
if (std::string(relpath) == "/")
- return -EEXIST;
+ return -CEPHFS_EEXIST;
filepath path(relpath);
string name = path.last_dentry();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << target << std::endl;
tout(cct) << relpath << std::endl;
if (std::string(relpath) == "/")
- return -EEXIST;
+ return -CEPHFS_EEXIST;
filepath path(relpath);
string name = path.last_dentry();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
int Client::_readlink(Inode *in, char *buf, size_t size)
{
if (!in->is_symlink())
- return -EINVAL;
+ return -CEPHFS_EINVAL;
// copy into buf (at most size bytes)
int r = in->symlink.length();
ccap_string(issued) << dendl;
if (in->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if ((mask & CEPH_SETATTR_SIZE) &&
(uint64_t)stx->stx_size > in->size &&
is_quota_bytes_exceeded(in, (uint64_t)stx->stx_size - in->size,
perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
// make the change locally?
} else { //too big!
put_request(req);
ldout(cct,10) << "unable to set size to " << stx->stx_size << ". Too large!" << dendl;
- return -EFBIG;
+ return -CEPHFS_EFBIG;
}
req->inode_drop |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
CEPH_CAP_FILE_WR;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
return _setattr(f->inode, attr, mask, perms);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
return _setattrx(f->inode, stx, mask, perms);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl;
tout(cct) << "stat" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " want " << want << ")" << dendl;
tout(cct) << "statx" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl;
tout(cct) << __func__ << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
struct stat attr;
attr.st_mode = mode;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
struct stat attr;
attr.st_uid = new_uid;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
struct stat attr;
utime_t atime(times[0]);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _flock(f, operation, owner);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
}
r = _opendir(in.get(), dirpp, perms);
/* if ENOTDIR, dirpp will be an uninitialized point and it's very dangerous to access its value */
- if (r != -ENOTDIR)
+ if (r != -CEPHFS_ENOTDIR)
tout(cct) << (uintptr_t)*dirpp << std::endl;
return r;
}
int Client::_opendir(Inode *in, dir_result_t **dirpp, const UserPerm& perms)
{
if (!in->is_dir())
- return -ENOTDIR;
+ return -CEPHFS_ENOTDIR;
*dirpp = new dir_result_t(in, perms);
opened_dirs.insert(*dirpp);
ldout(cct, 8) << __func__ << "(" << in->ino << ") = " << 0 << " (" << *dirpp << ")" << dendl;
bufferlist dirbl;
int res = make_request(req, dirp->perms, NULL, NULL, -1, &dirbl);
- if (res == -EAGAIN) {
+ if (res == -CEPHFS_EAGAIN) {
ldout(cct, 10) << __func__ << " got EAGAIN, retrying" << dendl;
_readdir_rechoose_frag(dirp);
return _readdir_get_frag(dirp);
while (true) {
int mask = caps;
if (!dirp->inode->is_complete_and_ordered())
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
if (pd == dir->readdir_cache.end())
break;
Dentry *dn = *pd;
// the content of readdir_cache may change after _getattr(), so pd may be invalid iterator
pd = dir->readdir_cache.begin() + idx;
if (pd >= dir->readdir_cache.end() || *pd != dn)
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
struct ceph_statx stx;
struct dirent de;
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::unique_lock cl(client_lock);
dirp->inode->is_complete_and_ordered() &&
dirp->inode->caps_issued_mask(CEPH_CAP_FILE_SHARED, true)) {
int err = _readdir_cache_cb(dirp, cb, p, caps, getref);
- if (err != -EAGAIN)
+ if (err != -CEPHFS_EAGAIN)
return err;
}
if (gr.pos) { // but we got some entries already!
return gr.pos;
} // or we need a larger buffer
- return -ERANGE;
+ return -CEPHFS_ERANGE;
} else { // actual error, return it
return r;
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
int cflags = ceph_flags_sys2wire(flags);
int r = path_walk(path, &in, perms, followsym, mask);
if (r == 0 && (flags & O_CREAT) && (flags & O_EXCL))
- return -EEXIST;
+ return -CEPHFS_EEXIST;
#if defined(__linux__) && defined(O_PATH)
if (r == 0 && in->is_symlink() && (flags & O_NOFOLLOW) && !(flags & O_PATH))
#else
if (r == 0 && in->is_symlink() && (flags & O_NOFOLLOW))
#endif
- return -ELOOP;
+ return -CEPHFS_ELOOP;
- if (r == -ENOENT && (flags & O_CREAT)) {
+ if (r == -CEPHFS_ENOENT && (flags & O_CREAT)) {
filepath dirpath = path;
string dname = dirpath.last_dentry();
dirpath.pop_dentry();
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPHASH);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPINO);
filepath path(vino.ino);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPNAME);
req->set_filepath2(filepath(parent->ino));
{
if (in->snapid != CEPH_NOSNAP &&
(flags & (O_WRONLY | O_RDWR | O_CREAT | O_TRUNC | O_APPEND))) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
// use normalized flags to generate cmode
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "close enter(" << fd << ")" << dendl;
tout(cct) << "close" << std::endl;
std::scoped_lock lock(client_lock);
Fh *fh = get_filehandle(fd);
if (!fh)
- return -EBADF;
+ return -CEPHFS_EBADF;
int err = _release_fh(fh);
fd_map.erase(fd);
put_fd(fd);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "lseek" << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
return _lseek(f, offset, whence);
}
#ifdef SEEK_DATA
case SEEK_DATA:
if (offset < 0 || static_cast<uint64_t>(offset) >= in->size)
- return -ENXIO;
+ return -CEPHFS_ENXIO;
pos = offset;
break;
#endif
#ifdef SEEK_HOLE
case SEEK_HOLE:
if (offset < 0 || static_cast<uint64_t>(offset) >= in->size)
- return -ENXIO;
+ return -CEPHFS_ENXIO;
pos = in->size;
break;
#endif
default:
ldout(cct, 1) << __func__ << ": invalid whence value " << whence << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (pos < 0) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
} else {
f->pos = pos;
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "read" << std::endl;
tout(cct) << fd << std::endl;
std::unique_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
bufferlist bl;
/* We can't return bytes written larger than INT_MAX, clamp size to that */
int Client::preadv(int fd, const struct iovec *iov, int iovcnt, loff_t offset)
{
if (iovcnt < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
return _preadv_pwritev(fd, iov, iovcnt, offset, false);
}
utime_t start = ceph_clock_now();
if ((f->mode & CEPH_FILE_MODE_RD) == 0)
- return -EBADF;
+ return -CEPHFS_EBADF;
//bool lazy = f->mode == CEPH_FILE_MODE_LAZY;
if (offset < 0) {
client_lock.unlock();
int ret = onuninline->wait();
client_lock.lock();
- if (ret >= 0 || ret == -ECANCELED) {
+ if (ret >= 0 || ret == -CEPHFS_ECANCELED) {
in->inline_data.clear();
in->inline_version = CEPH_INLINE_NONE;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
int r = onfinish.wait();
// if we get ENOENT from OSD, assume 0 bytes returned
- if (r == -ENOENT)
+ if (r == -CEPHFS_ENOENT)
r = 0;
if (r < 0)
return r;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "write" << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *fh = get_filehandle(fd);
if (!fh)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (fh->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
/* We can't return bytes written larger than INT_MAX, clamp size to that */
size = std::min(size, (loff_t)INT_MAX);
int Client::pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset)
{
if (iovcnt < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
return _preadv_pwritev(fd, iov, iovcnt, offset, true);
}
{
#if defined(__linux__) && defined(O_PATH)
if (fh->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
loff_t totallen = 0;
for (unsigned i = 0; i < iovcnt; i++) {
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << fd << std::endl;
tout(cct) << offset << std::endl;
std::unique_lock cl(client_lock);
Fh *fh = get_filehandle(fd);
if (!fh)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _preadv_pwritev_locked(fh, iov, iovcnt, offset, write, true, cl);
}
uint64_t fpos = 0;
if ((uint64_t)(offset+size) > mdsmap->get_max_filesize()) //too large!
- return -EFBIG;
+ return -CEPHFS_EFBIG;
//ldout(cct, 7) << "write fh " << fh << " size " << size << " offset " << offset << dendl;
Inode *in = f->inode.get();
if (objecter->osdmap_pool_full(in->layout.pool_id)) {
- return -ENOSPC;
+ return -CEPHFS_ENOSPC;
}
ceph_assert(in->snapid == CEPH_NOSNAP);
// was Fh opened as writeable?
if ((f->mode & CEPH_FILE_MODE_WR) == 0)
- return -EBADF;
+ return -CEPHFS_EBADF;
// use/adjust fd pos?
if (offset < 0) {
uint64_t endoff = offset + size;
if (endoff > in->size && is_quota_bytes_exceeded(in, endoff - in->size,
f->actor_perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
//bool lazy = f->mode == CEPH_FILE_MODE_LAZY;
int uninline_ret = onuninline->wait();
client_lock.lock();
- if (uninline_ret >= 0 || uninline_ret == -ECANCELED) {
+ if (uninline_ret >= 0 || uninline_ret == -CEPHFS_ECANCELED) {
in->inline_data.clear();
in->inline_version = CEPH_INLINE_NONE;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
if ((f->mode & CEPH_FILE_MODE_WR) == 0)
- return -EBADF;
+ return -CEPHFS_EBADF;
struct stat attr;
attr.st_size = length;
return _setattr(f->inode, &attr, CEPH_SETATTR_SIZE, perms);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "fsync" << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (f->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
int r = _fsync(f, syncdataonly);
if (r == 0) {
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "fstat mask " << hex << mask << dec << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
int r = _getattr(f->inode, mask, perms);
if (r < 0)
return r;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "fstatx flags " << hex << flags << " want " << want << dec << std::endl;
tout(cct) << fd << std::endl;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
unsigned mask = statx_to_mask(flags, want);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "chdir" << std::endl;
tout(cct) << relpath << std::endl;
return r;
if (!(in.get()->is_dir()))
- return -ENOTDIR;
+ return -CEPHFS_ENOTDIR;
if (cwd != in)
cwd.swap(in);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << std::endl;
unsigned long int total_files_on_fs;
<< " " << fl->l_start << "~" << fl->l_len << dendl;
if (in->flags & I_ERROR_FILELOCK)
- return -EIO;
+ return -CEPHFS_EIO;
int lock_cmd;
if (F_RDLCK == fl->l_type)
else if (F_UNLCK == fl->l_type)
lock_cmd = CEPH_LOCK_UNLOCK;
else
- return -EIO;
+ return -CEPHFS_EIO;
if (op != CEPH_MDS_OP_SETFILELOCK || lock_cmd == CEPH_LOCK_UNLOCK)
sleep = 0;
lock_state = in->flock_locks.get();
} else {
ceph_abort();
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
_update_lock_state(fl, owner, lock_state);
{
// Set abort code, but do not kick. The abort code prevents the request
// from being re-sent.
- req->abort(-EINTR);
+ req->abort(-CEPHFS_EINTR);
if (req->mds < 0)
return 0; // haven't sent the request
lock_type = CEPH_LOCK_FCNTL_INTR;
else {
ceph_abort();
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
MetaRequest *intr_req = new MetaRequest(CEPH_MDS_OP_SETFILELOCK);
type = F_UNLCK;
break;
default:
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
struct flock fl;
int Client::get_snap_info(const char *path, const UserPerm &perms, SnapInfo *snap_info) {
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied()) {
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
}
std::scoped_lock lock(client_lock);
}
if (in->snapid == CEPH_NOSNAP) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
snap_info->id = in->snapid;
RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED);
if (!iref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
can_invalidate_dentries = can_invalidate;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock l(client_lock);
std::scoped_lock l(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _lazyio(f, enable);
}
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
// for now
_fsync(f, true);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
Inode *in = f->inode.get();
_fsync(f, true);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock l(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock l(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return f->inode->caps_issued();
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl;
ceph_assert(inode != NULL);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
ldout(cct, 3) << __func__ << " " << vino << dendl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
filepath fp(name, 0);
InodeRef in;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _getxattr(f->inode, name, value, size, perms);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return Client::_listxattr(f->inode.get(), list, size, perms);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _removexattr(f->inode, name, perms);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
_setxattr_maybe_wait_for_osdmap(name, value, size);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
_setxattr_maybe_wait_for_osdmap(name, value, size);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
_setxattr_maybe_wait_for_osdmap(name, value, size);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
return _setxattr(f->inode, name, value, size, flags, perms);
}
const VXattr *vxattr = _match_vxattr(in, name);
if (vxattr) {
- r = -ENODATA;
+ r = -CEPHFS_ENODATA;
// Do a force getattr to get the latest quota before returning
// a value to userspace.
if (!(vxattr->exists_cb && !(this->*(vxattr->exists_cb))(in))) {
r = (this->*(vxattr->getxattr_cb))(in, buf, sizeof(buf));
} else {
- r = -ENODATA;
+ r = -CEPHFS_ENODATA;
}
if (size != 0) {
if (r > (int)size) {
- r = -ERANGE;
+ r = -CEPHFS_ERANGE;
} else if (r > 0) {
memcpy(value, buf, r);
}
}
if (acl_type == NO_ACL && !strncmp(name, "system.", 7)) {
- r = -EOPNOTSUPP;
+ r = -CEPHFS_EOPNOTSUPP;
goto out;
}
r = _getattr(in, CEPH_STAT_CAP_XATTR, perms, in->xattr_version == 0);
if (r == 0) {
string n(name);
- r = -ENODATA;
+ r = -CEPHFS_ENODATA;
if (in->xattrs.count(n)) {
r = in->xattrs[n].length();
if (r > 0 && size != 0) {
if (size >= (unsigned)r)
memcpy(value, in->xattrs[n].c_str(), r);
else
- r = -ERANGE;
+ r = -CEPHFS_ERANGE;
}
}
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
continue;
if (this_len > size) {
- r = -ERANGE;
+ r = -CEPHFS_ERANGE;
goto out;
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
size_t size, int flags, const UserPerm& perms)
{
if (in->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (size == 0) {
value = "";
} else if (value == NULL) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
bool posix_acl_xattr = false;
strncmp(name, "trusted.", 8) &&
strncmp(name, "ceph.", 5) &&
!posix_acl_xattr)
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
bool check_realm = false;
} else if (!strcmp(name, ACL_EA_DEFAULT)) {
if (value) {
if (!S_ISDIR(in->mode))
- return -EACCES;
+ return -CEPHFS_EACCES;
int ret = posix_acl_check(value, size);
if (ret < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
if (ret == 0) {
value = NULL;
size = 0;
}
}
} else {
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
}
} else {
const VXattr *vxattr = _match_vxattr(in, name);
if (vxattr) {
if (vxattr->readonly)
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
if (vxattr->name.compare(0, 10, "ceph.quota") == 0 && value)
check_realm = true;
}
// check if snaprealm was created for quota inode
if (in->quota.is_enable() &&
!(in->snaprealm && in->snaprealm->ino == in->ino))
- ret = -EOPNOTSUPP;
+ ret = -CEPHFS_EOPNOTSUPP;
}
return ret;
keys_and_values<string::iterator> p; // create instance of parser
std::map<string, string> m; // map to receive results
if (!qi::parse(begin, end, p, m)) { // returns true if successful
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (begin != end)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
for (map<string,string>::iterator q = m.begin(); q != m.end(); ++q) {
if (q->first == "pool") {
tmp = q->second;
try {
pool = boost::lexical_cast<unsigned>(tmp);
if (!osdmap->have_pg_pool(pool))
- return -ENOENT;
+ return -CEPHFS_ENOENT;
} catch (boost::bad_lexical_cast const&) {
pool = osdmap->lookup_pg_pool_name(tmp);
if (pool < 0) {
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
}
}
return _setxattr_check_data_pool(rest, v, &o);
});
- if (r == -ENOENT) {
+ if (r == -CEPHFS_ENOENT) {
bs::error_code ec;
ldout(cct, 20) << __func__ << ": waiting for latest osdmap" << dendl;
objecter->wait_for_latest_osdmap(ca::use_blocked[ec]);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
_setxattr_maybe_wait_for_osdmap(name, value, size);
int Client::_removexattr(Inode *in, const char *name, const UserPerm& perms)
{
if (in->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
// same xattrs supported by kernel client
strncmp(name, "security.", 9) &&
strncmp(name, "trusted.", 8) &&
strncmp(name, "ceph.", 5))
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
const VXattr *vxattr = _match_vxattr(in, name);
if (vxattr && vxattr->readonly)
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_RMXATTR);
filepath path;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
}
bool Client::_vxattrcb_dir_pin_exists(Inode *in)
{
- return in->dir_pin != -ENODATA;
+ return in->dir_pin != -CEPHFS_ENODATA;
}
size_t Client::_vxattrcb_dir_pin(Inode *in, char *val, size_t size)
{
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
<< ", gid " << perms.gid() << ")" << dendl;
if (strlen(name) > NAME_MAX)
- return -ENAMETOOLONG;
+ return -CEPHFS_ENAMETOOLONG;
if (dir->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (is_quota_files_exceeded(dir, perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_MKNOD);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
unsigned caps = statx_to_mask(flags, want);
mode << dec << ")" << dendl;
if (strlen(name) > NAME_MAX)
- return -ENAMETOOLONG;
+ return -CEPHFS_ENAMETOOLONG;
if (dir->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (is_quota_files_exceeded(dir, perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
// use normalized flags to generate cmode
pool_id = objecter->with_osdmap(
std::mem_fn(&OSDMap::lookup_pg_pool_name), data_pool);
if (pool_id < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
if (pool_id > 0xffffffffll)
- return -ERANGE; // bummer!
+ return -CEPHFS_ERANGE; // bummer!
}
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_CREATE);
<< ", gid " << perm.gid() << ")" << dendl;
if (strlen(name) > NAME_MAX)
- return -ENAMETOOLONG;
+ return -CEPHFS_ENAMETOOLONG;
if (dir->snapid != CEPH_NOSNAP && dir->snapid != CEPH_SNAPDIR) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (is_quota_files_exceeded(dir, perm)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
bool is_snap_op = dir->snapid == CEPH_SNAPDIR;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
<< dendl;
if (strlen(name) > NAME_MAX)
- return -ENAMETOOLONG;
+ return -CEPHFS_ENAMETOOLONG;
if (dir->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (is_quota_files_exceeded(dir, perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_SYMLINK);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
<< ")" << dendl;
if (dir->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_UNLINK);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
<< perms.uid() << " gid " << perms.gid() << ")" << dendl;
if (dir->snapid != CEPH_NOSNAP && dir->snapid != CEPH_SNAPDIR) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
int op = dir->snapid == CEPH_SNAPDIR ? CEPH_MDS_OP_RMSNAP : CEPH_MDS_OP_RMDIR;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
<< dendl;
if (fromdir->snapid != todir->snapid)
- return -EXDEV;
+ return -CEPHFS_EXDEV;
int op = CEPH_MDS_OP_RENAME;
if (fromdir->snapid != CEPH_NOSNAP) {
if (fromdir == todir && fromdir->snapid == CEPH_SNAPDIR)
op = CEPH_MDS_OP_RENAMESNAP;
else
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (fromdir != todir) {
Inode *fromdir_root =
Inode *todir_root =
todir->quota.is_enable() ? todir : get_quota_root(todir, perm);
if (fromdir_root != todir_root) {
- return -EXDEV;
+ return -CEPHFS_EXDEV;
}
}
}
req->other_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
break;
- case -ENOENT:
+ case -CEPHFS_ENOENT:
break;
default:
goto fail;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vparent = _get_vino(parent);
vinodeno_t vnewparent = _get_vino(newparent);
if (r < 0)
return r;
r = may_delete(newparent, newname, perm);
- if (r < 0 && r != -ENOENT)
+ if (r < 0 && r != -CEPHFS_ENOENT)
return r;
}
<< " uid " << perm.uid() << " gid " << perm.gid() << ")" << dendl;
if (strlen(newname) > NAME_MAX)
- return -ENAMETOOLONG;
+ return -CEPHFS_ENAMETOOLONG;
if (in->snapid != CEPH_NOSNAP || dir->snapid != CEPH_NOSNAP) {
- return -EROFS;
+ return -CEPHFS_EROFS;
}
if (is_quota_files_exceeded(dir, perm)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
in->break_all_delegs();
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
vinodeno_t vnewparent = _get_vino(newparent);
if (!fuse_default_permissions) {
if (S_ISDIR(in->mode))
- return -EPERM;
+ return -CEPHFS_EPERM;
int r = may_hardlink(in, perm);
if (r < 0)
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_releasedir " << dirp << dendl;
tout(cct) << "ll_releasedir" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_fsyncdir " << dirp << dendl;
tout(cct) << "ll_fsyncdir" << std::endl;
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
int r = _lookup(parent, name, caps, in, perms);
if (r == 0 && (flags & O_CREAT) && (flags & O_EXCL))
- return -EEXIST;
+ return -CEPHFS_EEXIST;
- if (r == -ENOENT && (flags & O_CREAT)) {
+ if (r == -CEPHFS_ENOENT && (flags & O_CREAT)) {
if (!fuse_default_permissions) {
r = may_create(parent, perms);
if (r < 0)
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
InodeRef in;
unsigned caps = statx_to_mask(lflags, want);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
InodeRef in;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << "ll_lseek" << std::endl;
tout(cct) << offset << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_read " << fh << " " << fh->inode->ino << " " << " " << off << "~" << len << dendl;
tout(cct) << "ll_read" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
vinodeno_t vino = _get_vino(in);
object_t oid = file_object_t(vino.ino, blockid);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
if (length == 0) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (true || sync) {
/* if write is stable, the epilogue is waiting on
<< offset << " to " << length << dendl;
if (length == 0) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
std::scoped_lock lock(client_lock);
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
/* We can't return bytes written larger than INT_MAX, clamp len to that */
len = std::min(len, (loff_t)INT_MAX);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::unique_lock cl(client_lock);
return _preadv_pwritev_locked(fh, iov, iovcnt, off, true, false, cl);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::unique_lock cl(client_lock);
return _preadv_pwritev_locked(fh, iov, iovcnt, off, false, false, cl);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_flush " << fh << " " << fh->inode->ino << " " << dendl;
tout(cct) << "ll_flush" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_fsync " << fh << " " << fh->inode->ino << " " << dendl;
tout(cct) << "ll_fsync" << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_sync_inode " << *in << " " << dendl;
tout(cct) << "ll_sync_inode" << std::endl;
ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
if (offset < 0 || length <= 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE))
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
Inode *in = fh->inode.get();
if (objecter->osdmap_pool_full(in->layout.pool_id) &&
!(mode & FALLOC_FL_PUNCH_HOLE)) {
- return -ENOSPC;
+ return -CEPHFS_ENOSPC;
}
if (in->snapid != CEPH_NOSNAP)
- return -EROFS;
+ return -CEPHFS_EROFS;
if ((fh->mode & CEPH_FILE_MODE_WR) == 0)
- return -EBADF;
+ return -CEPHFS_EBADF;
uint64_t size = offset + length;
if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
size > in->size &&
is_quota_bytes_exceeded(in, size - in->size, fh->actor_perms)) {
- return -EDQUOT;
+ return -CEPHFS_EDQUOT;
}
int have;
int ret = onuninline->wait();
client_lock.lock();
- if (ret >= 0 || ret == -ECANCELED) {
+ if (ret >= 0 || ret == -CEPHFS_ECANCELED) {
in->inline_data.clear();
in->inline_version = CEPH_INLINE_NONE;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length)
{
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
}
#endif
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " " << fh << " " << fh->inode->ino << " " << dendl;
tout(cct) << __func__ << " " << mode << " " << offset << " " << length << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
tout(cct) << __func__ << " " << " " << fd << mode << " " << offset << " " << length << std::endl;
std::scoped_lock lock(client_lock);
Fh *fh = get_filehandle(fd);
if (!fh)
- return -EBADF;
+ return -CEPHFS_EBADF;
#if defined(__linux__) && defined(O_PATH)
if (fh->flags & O_PATH)
- return -EBADF;
+ return -CEPHFS_EBADF;
#endif
return _fallocate(fh, mode, offset, length);
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " (fh)" << fh << " " << fh->inode->ino << " " <<
dendl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << "ll_getlk (fh)" << fh << " " << fh->inode->ino << dendl;
tout(cct) << "ll_getk (fh)" << (uintptr_t)fh << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl;
tout(cct) << __func__ << " (fh)" << (uintptr_t)fh << std::endl;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl;
tout(cct) << __func__ << " (fh)" << (uintptr_t)fh << std::endl;
* delegation before the session autoclose timeout kicks in.
*/
if (timeout >= mdsmap->get_session_autoclose())
- return -EINVAL;
+ return -CEPHFS_EINVAL;
deleg_timeout = timeout;
return 0;
int Client::ll_delegation(Fh *fh, unsigned cmd, ceph_deleg_cb_t cb, void *priv)
{
- int ret = -EINVAL;
+ int ret = -CEPHFS_EINVAL;
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
try {
ret = inode->set_deleg(fh, cmd, cb, priv);
} catch (std::bad_alloc&) {
- ret = -ENOMEM;
+ ret = -CEPHFS_ENOMEM;
}
break;
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
Inode *in = f->inode.get();
*lp = in->layout;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
return objecter->with_osdmap([pool](const OSDMap& o) {
- return o.have_pg_pool(pool) ? o.get_pg_pool(pool)->get_size() : -ENOENT;
+ return o.have_pg_pool(pool) ? o.get_pg_pool(pool)->get_size() : -CEPHFS_ENOENT;
});
}
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
Inode *in = f->inode.get();
vector<ObjectExtent> extents;
});
if (osds.empty())
- return -EINVAL;
+ return -CEPHFS_EINVAL;
/*
* Return the remainder of the extent (stripe unit)
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
if (id < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
return objecter->with_osdmap([&](const OSDMap& o) {
return o.crush->get_full_location_ordered(id, path);
});
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
Inode *in = f->inode.get();
// which object?
vector<int> osds;
o.pg_to_acting_osds(pg, osds);
if (osds.empty())
- return -EINVAL;
+ return -CEPHFS_EINVAL;
for (unsigned i = 0; i < osds.size(); i++) {
entity_addr_t addr = o.get_addrs(osds[i]).front();
address.push_back(addr);
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
return objecter->with_osdmap([&](const OSDMap& o) {
if (!o.exists(osd))
- return -ENOENT;
+ return -CEPHFS_ENOENT;
addr = o.get_addrs(osd).front();
return 0;
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
Fh *f = get_filehandle(fd);
if (!f)
- return -EBADF;
+ return -CEPHFS_EBADF;
Inode *in = f->inode.get();
// map to a list of extents
}
-/* find an osd with the same ip. -ENXIO if none. */
+/* find an osd with the same ip. -CEPHFS_ENXIO if none. */
int Client::get_local_osd()
{
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
std::scoped_lock lock(client_lock);
bool errored = false;
- if (rd_ret == 0 || rd_ret == -ENOENT)
+ if (rd_ret == 0 || rd_ret == -CEPHFS_ENOENT)
have |= POOL_READ;
- else if (rd_ret != -EPERM) {
+ else if (rd_ret != -CEPHFS_EPERM) {
ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl;
errored = true;
}
- if (wr_ret == 0 || wr_ret == -EEXIST)
+ if (wr_ret == 0 || wr_ret == -CEPHFS_EEXIST)
have |= POOL_WRITE;
- else if (wr_ret != -EPERM) {
+ else if (wr_ret != -CEPHFS_EPERM) {
ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl;
errored = true;
// userspace filesystem user.
pool_perms.erase(perm_key);
signal_cond_list(waiting_for_pool_perm);
- return -EIO;
+ return -CEPHFS_EIO;
}
pool_perms[perm_key] = have | POOL_CHECKED;
if ((need & CEPH_CAP_FILE_RD) && !(have & POOL_READ)) {
ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " need " << ccap_string(need) << ", but no read perm" << dendl;
- return -EPERM;
+ return -CEPHFS_EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(have & POOL_WRITE)) {
ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " need " << ccap_string(need) << ", but no write perm" << dendl;
- return -EPERM;
+ return -CEPHFS_EPERM;
}
return 0;
return posix_acl_permits(access_acl, in->uid, in->gid, perms, want);
}
}
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
int Client::_posix_acl_chmod(Inode *in, mode_t mode, const UserPerm& perms)
{
RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED);
if (!iref_reader.is_state_satisfied())
- return -ENOTCONN;
+ return -CEPHFS_ENOTCONN;
if (uuid.empty())
- return -EINVAL;
+ return -CEPHFS_EINVAL;
std::unique_lock l(client_lock);
{
auto it = metadata.find("uuid");
if (it != metadata.end() && it->second == uuid)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
int r = subscribe_mdsmap(fs_name);
if (!have_open_session(mds)) {
session = _get_or_open_mds_session(mds);
if (session->state == MetaSession::STATE_REJECTED)
- return -EPERM;
+ return -CEPHFS_EPERM;
if (session->state != MetaSession::STATE_OPENING) {
// umounting?
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
ldout(cct, 10) << "waiting for session to mds." << mds << " to open" << dendl;
wait_on_context_list(session->waiting_for_open);
session = &mds_sessions.at(mds);
if (!session->mds_features.test(CEPHFS_FEATURE_RECLAIM_CLIENT))
- return -EOPNOTSUPP;
+ return -CEPHFS_EOPNOTSUPP;
if (session->reclaim_state == MetaSession::RECLAIM_NULL ||
session->reclaim_state == MetaSession::RECLAIMING) {
session->con->send_message2(std::move(m));
wait_on_list(waiting_for_reclaim);
} else if (session->reclaim_state == MetaSession::RECLAIM_FAIL) {
- return reclaim_errno ? : -ENOTRECOVERABLE;
+ return reclaim_errno ? : -CEPHFS_ENOTRECOVERABLE;
} else {
mds++;
}
// didn't find target session in any mds
if (reclaim_target_addrs.empty()) {
if (flags & CEPH_RECLAIM_RESET)
- return -ENOENT;
- return -ENOTRECOVERABLE;
+ return -CEPHFS_ENOENT;
+ return -CEPHFS_ENOTRECOVERABLE;
}
if (flags & CEPH_RECLAIM_RESET)
return osd_map.is_blocklisted(reclaim_target_addrs);
});
if (blocklisted)
- return -ENOTRECOVERABLE;
+ return -CEPHFS_ENOTRECOVERABLE;
metadata["reclaiming_uuid"] = uuid;
return 0;
/**
* Returns the length of the buffer that got filled in, or -errno.
- * If it returns -ERANGE you just need to increase the size of the
+ * If it returns -CEPHFS_ERANGE you just need to increase the size of the
* buffer and try again.
*/
int _getdents(dir_result_t *dirp, char *buf, int buflen, bool ful); // get a bunch of dentries at once
ino_t last_used_faked_ino;
ino_t last_used_faked_root;
- int local_osd = -ENXIO;
+ int local_osd = -CEPHFS_ENXIO;
epoch_t local_osd_epoch = 0;
// mds requests
* allow it, with an unusual error to make it clear.
*/
if (!client->get_deleg_timeout())
- return -ETIME;
+ return -CEPHFS_ETIME;
// Just say no if we have any recalled delegs still outstanding
if (has_recalled_deleg()) {
lsubdout(client->cct, client, 10) << __func__ <<
": has_recalled_deleg" << dendl;
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
// check vs. currently open files on this inode
if (open_count_for_write()) {
lsubdout(client->cct, client, 10) << __func__ <<
": open for write" << dendl;
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
break;
case CEPH_DELEGATION_WR:
if (open_count() > 1) {
lsubdout(client->cct, client, 10) << __func__ << ": open" << dendl;
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
break;
default:
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
/*
if (!caps_issued_mask(need)) {
lsubdout(client->cct, client, 10) << __func__ << ": cap mismatch, have="
<< ccap_string(caps_issued()) << " need=" << ccap_string(need) << dendl;
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
for (list<Delegation>::iterator d = delegations.begin();
#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+static const ceph::unordered_map<int,int> cephfs_errno_to_system_errno = {
+ {CEPHFS_EBLOCKLISTED, ESHUTDOWN},
+ {CEPHFS_EPERM, EPERM},
+ {CEPHFS_ESTALE, ESTALE},
+ {CEPHFS_ENOSPC, ENOSPC},
+ {CEPHFS_ETIMEDOUT, ETIMEDOUT},
+ {CEPHFS_EIO, EIO},
+ {CEPHFS_ENOTCONN, ENOTCONN},
+ {CEPHFS_EEXIST, EEXIST},
+ {CEPHFS_EINTR, EINTR},
+ {CEPHFS_EINVAL, EINVAL},
+ {CEPHFS_EBADF, EBADF},
+ {CEPHFS_EROFS, EROFS},
+ {CEPHFS_EAGAIN, EAGAIN},
+ {CEPHFS_EACCES, EACCES},
+ {CEPHFS_ELOOP, ELOOP},
+ {CEPHFS_EISDIR, EISDIR},
+ {CEPHFS_ENOENT, ENOENT},
+ {CEPHFS_ENOTDIR, ENOTDIR},
+ {CEPHFS_ENAMETOOLONG, ENAMETOOLONG},
+ {CEPHFS_EBUSY, EBUSY},
+ {CEPHFS_EDQUOT, EDQUOT},
+ {CEPHFS_EFBIG, EFBIG},
+ {CEPHFS_ERANGE, ERANGE},
+ {CEPHFS_ENXIO, ENXIO},
+ {CEPHFS_ECANCELED, ECANCELED},
+ {CEPHFS_ENODATA, ENODATA},
+ {CEPHFS_EOPNOTSUPP, EOPNOTSUPP},
+ {CEPHFS_EXDEV, EXDEV},
+ {CEPHFS_ENOMEM, ENOMEM},
+ {CEPHFS_ENOTRECOVERABLE, ENOTRECOVERABLE},
+ {CEPHFS_ENOSYS, ENOSYS},
+ {CEPHFS_ENOTEMPTY, ENOTEMPTY},
+ {CEPHFS_EDEADLK, EDEADLK},
+ {CEPHFS_EDOM, EDOM},
+ {CEPHFS_EMLINK, EMLINK},
+ {CEPHFS_ETIME, ETIME},
+ {CEPHFS_EOLDSNAPC, EIO} // forcing to EIO for now
+};
+
+/* Requirements:
+ * cephfs_errno >= 0
+ */
+static int get_sys_errno(int cephfs_errno)
+{
+ if (cephfs_errno == 0)
+ return 0;
+
+ auto it = cephfs_errno_to_system_errno.find(cephfs_errno);
+ if (it != cephfs_errno_to_system_errno.end())
+ return it->second;
+ return EIO;
+}
+
static uint32_t new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
gid_t *gids = new (std::nothrow) gid_t[c];
if (!gids) {
- return -ENOMEM;
+ return -get_sys_errno(CEPHFS_ENOMEM);
}
c = fuse_req_getgroups(req, c, gids);
if (c < 0) {
}
return c;
#endif
- return -ENOSYS;
+ return -get_sys_errno(CEPHFS_ENOSYS);
}
static void get_fuse_groups(UserPerm& perms, fuse_req_t req)
{
r = cfuse->client->lookup_ino(parent, perms, &i1);
if (r < 0) {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
return;
}
}
fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev);
fuse_reply_entry(req, &fe);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
// XXX NB, we dont iput(i2) because FUSE will do so in a matching
if (r == 0)
fuse_reply_attr(req, attr, 0);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
get_fuse_groups(perms, req);
int r = cfuse->client->ll_setxattr(in, name, value, size, flags, perms);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
else if (r >= 0)
fuse_reply_buf(req, buf, r);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
else if (r >= 0)
fuse_reply_buf(req, buf, r);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
get_fuse_groups(perms, req);
int r = cfuse->client->ll_removexattr(in, name, perms);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
fi->fh = (uint64_t)dirp;
fuse_reply_open(req, fi);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
cfuse->iput(in); // iput required
buf[r] = '\0';
fuse_reply_readlink(req, buf);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
cfuse->iput(in); // iput required
fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev);
fuse_reply_entry(req, &fe);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
// XXX NB, we dont iput(i2) because FUSE will do so in a matching
fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev);
fuse_reply_entry(req, &fe);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
// XXX NB, we dont iput(i2) because FUSE will do so in a matching
get_fuse_groups(perm, req);
int r = cfuse->client->ll_unlink(in, name, perm);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
get_fuse_groups(perms, req);
int r = cfuse->client->ll_rmdir(in, name, perms);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev);
fuse_reply_entry(req, &fe);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
// XXX NB, we dont iput(i2) because FUSE will do so in a matching
get_fuse_groups(perm, req);
int r = cfuse->client->ll_rename(in, name, nin, newname, perm);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iputs required
cfuse->iput(nin);
* On error however, we must put that reference.
*/
cfuse->iput(in);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
cfuse->iput(nin);
#endif
fuse_reply_open(req, fi);
} else {
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
cfuse->iput(in); // iput required
iov.insert(iov.begin(), {0}); // the first one is reserved for fuse_out_header
fuse_reply_iov(req, &iov[0], iov.size());
} else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_write(fuse_req_t req, fuse_ino_t ino, const char *buf,
if (r >= 0)
fuse_reply_write(req, r);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_flush(fuse_req_t req, fuse_ino_t ino,
CephFuse::Handle *cfuse = fuse_ll_req_prepare(req);
Fh *fh = reinterpret_cast<Fh*>(fi->fh);
int r = cfuse->client->ll_flush(fh);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
#ifdef FUSE_IOCTL_COMPAT
CephFuse::Handle *cfuse = fuse_ll_req_prepare(req);
Fh *fh = (Fh*)fi->fh;
int r = cfuse->client->ll_fallocate(fh, mode, offset, length);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
#endif
CephFuse::Handle *cfuse = fuse_ll_req_prepare(req);
Fh *fh = reinterpret_cast<Fh*>(fi->fh);
int r = cfuse->client->ll_release(fh);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
CephFuse::Handle *cfuse = fuse_ll_req_prepare(req);
Fh *fh = reinterpret_cast<Fh*>(fi->fh);
int r = cfuse->client->ll_fsync(fh, datasync);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
struct readdir_context {
rc.snap = cfuse->fino_snap(ino);
int r = cfuse->client->readdir_r_cb(dirp, fuse_ll_add_dirent, &rc);
- if (r == 0 || r == -ENOSPC) /* ignore ENOSPC from our callback */
+ if (r == 0 || r == -CEPHFS_ENOSPC) /* ignore ENOSPC from our callback */
fuse_reply_buf(req, rc.buf, rc.pos);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
delete[] rc.buf;
}
CephFuse::Handle *cfuse = fuse_ll_req_prepare(req);
dir_result_t *dirp = reinterpret_cast<dir_result_t*>(fi->fh);
int r = cfuse->client->ll_fsyncdir(dirp);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_access(fuse_req_t req, fuse_ino_t ino, int mask)
get_fuse_groups(perms, req);
int r = cfuse->client->inode_permission(in, perms, mask);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in);
}
#endif
fuse_reply_create(req, &fe, fi);
} else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
// XXX NB, we dont iput(i2) because FUSE will do so in a matching
// fuse_ll_forget()
cfuse->iput(i1); // iput required
if (r == 0)
fuse_reply_statfs(req, &stbuf);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
cfuse->iput(in); // iput required
}
if (r == 0)
fuse_reply_lock(req, lock);
else
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_setlk(fuse_req_t req, fuse_ino_t ino,
}
int r = cfuse->client->ll_setlk(fh, lock, fi->lock_owner, sleep);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
static void fuse_ll_interrupt(fuse_req_t req, void* data)
}
int r = cfuse->client->ll_flock(fh, cmd, fi->lock_owner);
- fuse_reply_err(req, -r);
+ fuse_reply_err(req, get_sys_errno(-r));
}
#endif
String directory = abspath.substr(0, abspath.rfind('/'));
int r;
HT_INFOF("Calling mkdirs on %s", directory.c_str());
- if((r=ceph_mkdirs(cmount, directory.c_str(), 0644)) < 0 && r!=-EEXIST) {
+ if((r=ceph_mkdirs(cmount, directory.c_str(), 0644)) < 0 && r!=-CEPHFS_EEXIST) {
HT_ERRORF("create failed on mkdirs: dname='%s' - %d", directory.c_str(), -r);
report_error(cb, -r);
return;
make_abs_path(dname, absdir);
int r;
- if((r=ceph_mkdirs(cmount, absdir.c_str(), 0644)) < 0 && r!=-EEXIST) {
+ if((r=ceph_mkdirs(cmount, absdir.c_str(), 0644)) < 0 && r!=-CEPHFS_EEXIST) {
HT_ERRORF("mkdirs failed: dname='%s' - %d", absdir.c_str(), -r);
report_error(cb, -r);
return;
int bufpos;
while (1) {
r = ceph_getdnames(cmount, dirp, buf, buflen);
- if (r==-ERANGE) { //expand the buffer
+ if (r==-CEPHFS_ERANGE) { //expand the buffer
delete [] buf;
buflen *= 2;
buf = new char[buflen];
#include "include/compat.h"
#include "include/types.h"
+#include "include/fs_types.h"
#include <sys/stat.h>
#include "posix_acl.h"
#include "UserPerm.h"
int posix_acl_equiv_mode(const void *xattr, size_t size, mode_t *mode_p)
{
if (posix_acl_check(xattr, size) < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
int not_equiv = 0;
mode_t mode = 0;
not_equiv = 1;
break;
default:
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
++entry;
}
int posix_acl_inherit_mode(bufferptr& acl, mode_t *mode_p)
{
if (posix_acl_check(acl.c_str(), acl.length()) <= 0)
- return -EIO;
+ return -CEPHFS_EIO;
acl_ea_entry *group_entry = NULL, *mask_entry = NULL;
mode_t mode = *mode_p;
not_equiv = 1;
break;
default:
- return -EIO;
+ return -CEPHFS_EIO;
}
++entry;
mask_entry->e_perm = perm;
} else {
if (!group_entry)
- return -EIO;
+ return -CEPHFS_EIO;
__u16 perm = group_entry->e_perm;
perm &= (mode >> 3) | ~S_IRWXO;
mode &= (perm << 3) | ~S_IRWXG;
int posix_acl_access_chmod(bufferptr& acl, mode_t mode)
{
if (posix_acl_check(acl.c_str(), acl.length()) <= 0)
- return -EIO;
+ return -CEPHFS_EIO;
acl_ea_entry *group_entry = NULL, *mask_entry = NULL;
mask_entry->e_perm = (mode & S_IRWXG) >> 3;
} else {
if (!group_entry)
- return -EIO;
+ return -CEPHFS_EIO;
group_entry->e_perm = (mode & S_IRWXG) >> 3;
}
return 0;
const UserPerm& perms, unsigned want)
{
if (posix_acl_check(acl.c_str(), acl.length()) < 0)
- return -EIO;
+ return -CEPHFS_EIO;
const acl_ea_header *header = reinterpret_cast<const acl_ea_header*>(acl.c_str());
const acl_ea_entry *entry = header->a_entries;
break;
case ACL_OTHER:
if (group_found)
- return -EACCES;
+ return -CEPHFS_EACCES;
else
goto check_perm;
break;
default:
- return -EIO;
+ return -CEPHFS_EIO;
}
++entry;
}
- return -EIO;
+ return -CEPHFS_EIO;
check_mask:
next_entry = entry + 1;
__u16 mask = next_entry->e_perm;
if ((perm & mask & want) == want)
return 0;
- return -EACCES;
+ return -CEPHFS_EACCES;
}
++next_entry;
}
check_perm:
if ((perm & want) == want)
return 0;
- return -EACCES;
+ return -CEPHFS_EACCES;
}
#include "types.h"
class JSONObj;
+#define CEPHFS_EBLOCKLISTED 108
+#define CEPHFS_EPERM 1
+#define CEPHFS_ESTALE 116
+#define CEPHFS_ENOSPC 28
+#define CEPHFS_ETIMEDOUT 110
+#define CEPHFS_EIO 5
+#define CEPHFS_ENOTCONN 107
+#define CEPHFS_EEXIST 17
+#define CEPHFS_EINTR 4
+#define CEPHFS_EINVAL 22
+#define CEPHFS_EBADF 9
+#define CEPHFS_EROFS 30
+#define CEPHFS_EAGAIN 11
+#define CEPHFS_EACCES 13
+#define CEPHFS_ELOOP 40
+#define CEPHFS_EISDIR 21
+#define CEPHFS_ENOENT 2
+#define CEPHFS_ENOTDIR 20
+#define CEPHFS_ENAMETOOLONG 36
+#define CEPHFS_EBUSY 16
+#define CEPHFS_EDQUOT 122
+#define CEPHFS_EFBIG 27
+#define CEPHFS_ERANGE 34
+#define CEPHFS_ENXIO 6
+#define CEPHFS_ECANCELED 125
+#define CEPHFS_ENODATA 61
+#define CEPHFS_EOPNOTSUPP 95
+#define CEPHFS_EXDEV 18
+#define CEPHFS_ENOMEM 12
+#define CEPHFS_ENOTRECOVERABLE 131
+#define CEPHFS_ENOSYS 38
+#define CEPHFS_EWOULDBLOCK CEPHFS_EAGAIN
+#define CEPHFS_ENOTEMPTY 39
+#define CEPHFS_EDEADLK 35
+#define CEPHFS_EDEADLOCK CEPHFS_EDEADLK
+#define CEPHFS_EDOM 33
+#define CEPHFS_EMLINK 31
+#define CEPHFS_ETIME 62
+#define CEPHFS_EOLDSNAPC 85
+
+
// --------------------------------------
// ino
CDirIOContext(d), fin(f), ret1(0), ret2(0), ret3(0) { }
void finish(int r) override {
// check the correctness of backtrace
- if (r >= 0 && ret3 != -ECANCELED)
+ if (r >= 0 && ret3 != -CEPHFS_ECANCELED)
dir->inode->verify_diri_backtrace(btbl, ret3);
if (r >= 0) r = ret1;
if (r >= 0) r = ret2;
rd.getxattr("parent", &fin->btbl, &fin->ret3);
rd.set_last_op_flags(CEPH_OSD_OP_FLAG_FAILOK);
} else {
- fin->ret3 = -ECANCELED;
+ fin->ret3 = -CEPHFS_ECANCELED;
}
mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, NULL, 0,
dout(10) << "_fetched header " << hdrbl.length() << " bytes "
<< omap.size() << " keys for " << *this << dendl;
- ceph_assert(r == 0 || r == -ENOENT || r == -ENODATA);
+ ceph_assert(r == 0 || r == -CEPHFS_ENOENT || r == -CEPHFS_ENODATA);
ceph_assert(is_auth());
ceph_assert(!is_frozen());
<< err.what() << "(" << get_path() << ")";
// Remember that this dentry is damaged. Subsequent operations
- // that try to act directly on it will get their EIOs, but this
+ // that try to act directly on it will get their CEPHFS_EIOs, but this
// dirfrag as a whole will continue to look okay (minus the
// mysteriously-missing dentry)
go_bad_dentry(last, dname);
state_clear(STATE_FETCHING);
auth_unpin(this);
- finish_waiting(WAIT_COMPLETE, -EIO);
+ finish_waiting(WAIT_COMPLETE, -CEPHFS_EIO);
}
// -----------------------
{
if (r < 0) {
// the directory could be partly purged during MDS failover
- if (r == -ENOENT && committed_version == 0 &&
+ if (r == -CEPHFS_ENOENT && committed_version == 0 &&
!inode->is_base() && get_parent_dir()->inode->is_stray()) {
r = 0;
if (inode->snaprealm)
Context *fin;
C_IO_Inode_Fetched(CInode *i, Context *f) : CInodeIOContext(i), fin(f) {}
void finish(int r) override {
- // Ignore 'r', because we fetch from two places, so r is usually ENOENT
+ // Ignore 'r', because we fetch from two places, so r is usually CEPHFS_ENOENT
in->_fetched(bl, bl2, fin);
}
void print(ostream& out) const override {
p = bl.cbegin();
} else {
derr << "No data while reading inode " << ino() << dendl;
- fin->complete(-ENOENT);
+ fin->complete(-CEPHFS_ENOENT);
return;
}
if (magic != CEPH_FS_ONDISK_MAGIC) {
dout(0) << "on disk magic '" << magic << "' != my magic '" << CEPH_FS_ONDISK_MAGIC
<< "'" << dendl;
- fin->complete(-EINVAL);
+ fin->complete(-CEPHFS_EINVAL);
} else {
decode_store(p);
dout(10) << "_fetched " << *this << dendl;
}
} catch (buffer::error &err) {
derr << "Corrupt inode " << ino() << ": " << err.what() << dendl;
- fin->complete(-EINVAL);
+ fin->complete(-CEPHFS_EINVAL);
return;
}
}
void CInode::_stored_backtrace(int r, version_t v, Context *fin)
{
- if (r == -ENOENT) {
+ if (r == -CEPHFS_ENOENT) {
const int64_t pool = get_backtrace_pool();
bool exists = mdcache->mds->objecter->with_osdmap(
[pool](const OSDMap &osd_map) {
return osd_map.have_pg_pool(pool);
});
- // This ENOENT is because the pool doesn't exist (the user deleted it
+ // This CEPHFS_ENOENT is because the pool doesn't exist (the user deleted it
// out from under us), so the backtrace can never be written, so pretend
// to succeed so that the user can proceed to e.g. delete the file.
if (!exists) {
- dout(4) << __func__ << " got ENOENT: a data pool was deleted "
+ dout(4) << __func__ << " got CEPHFS_ENOENT: a data pool was deleted "
"beneath us!" << dendl;
r = 0;
}
if (backtrace.ancestors.empty() ||
backtrace.ancestors[0].dname != pdn->get_name() ||
backtrace.ancestors[0].dirino != pdn->get_dir()->ino())
- err = -EINVAL;
+ err = -CEPHFS_EINVAL;
}
if (err) {
sizeof(struct ceph_timespec) + 8; // btime + change_attr
if (bytes > max_bytes)
- return -ENOSPC;
+ return -CEPHFS_ENOSPC;
}
return 0;
}
}
- return -ENOENT;
+ return -CEPHFS_ENOENT;
} else {
*result = get_filesystem(fscid);
return 0;
if (r >= 0) {
ss << "Invalid file system";
}
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
return r;
if (colon_pos == std::string::npos) {
if (legacy_client_fscid == FS_CLUSTER_ID_NONE) {
ss << "No filesystem selected";
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
fs = get_filesystem(legacy_client_fscid);
rank_pos = 0;
} else {
if (parse_filesystem(role_str.substr(0, colon_pos), &fs) < 0) {
ss << "Invalid filesystem";
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
rank_pos = colon_pos+1;
}
long rank_i = strict_strtol(rank_str.c_str(), 10, &err);
if (rank_i < 0 || !err.empty()) {
ss << "Invalid rank '" << rank_str << "'";
- return -EINVAL;
+ return -CEPHFS_EINVAL;
} else {
rank = rank_i;
}
if (fs->mds_map.in.count(rank) == 0) {
ss << "Rank '" << rank << "' not found";
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
*role = {fs->fscid, rank};
try {
decode(q);
} catch (const buffer::error &e) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
} else {
dout(1) << "Journal pointer '" << object_id << "' read failed: " << cpp_strerror(r) << dendl;
/* success: store the balancer in memory and set the version. */
if (!r) {
if (ret_t == std::cv_status::timeout) {
- mds->objecter->op_cancel(tid, -ECANCELED);
- return -ETIMEDOUT;
+ mds->objecter->op_cancel(tid, -CEPHFS_ECANCELED);
+ return -CEPHFS_ETIMEDOUT;
}
bal_code.assign(lua_src.to_str());
bal_version.assign(oid.name);
/* mantle doesn't know about cluster size, so check target len here */
if ((int) state.targets.size() != cluster_size)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
else if (ret)
return ret;
MDCacheIOContext(c, false), in(i), ls(l) {
}
void finish(int r) override {
- ceph_assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -CEPHFS_ENOENT);
mdcache->truncate_inode_finish(in, ls);
}
void print(ostream& out) const override {
MDRequestRef null_ref;
return path_traverse(null_ref, cf, path, MDS_TRAVERSE_DISCOVER, nullptr);
}
- return -ESTALE;
+ return -CEPHFS_ESTALE;
}
if (cur->state_test(CInode::STATE_PURGING))
- return -ESTALE;
+ return -CEPHFS_ESTALE;
if (flags & MDS_TRAVERSE_CHECK_LOCKCACHE)
mds->locker->find_and_attach_lock_cache(mdr, cur);
if (!cur->is_dir()) {
dout(7) << "traverse: " << *cur << " not a dir " << dendl;
- return -ENOTDIR;
+ return -CEPHFS_ENOTDIR;
}
// walk into snapdir?
if (path[depth].length() == 0) {
dout(10) << "traverse: snapdir" << dendl;
if (!mdr || depth > 0) // snapdir must be the first component
- return -EINVAL;
+ return -CEPHFS_EINVAL;
snapid = CEPH_SNAPDIR;
mdr->snapid = snapid;
depth++;
// walk thru snapdir?
if (snapid == CEPH_SNAPDIR) {
if (!mdr)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
SnapRealm *realm = cur->find_snaprealm();
snapid = realm->resolve_snapname(path[depth], cur->ino());
dout(10) << "traverse: snap " << path[depth] << " -> " << snapid << dendl;
if (!snapid) {
if (pdnvec)
pdnvec->clear(); // do not confuse likes of rdlock_path_pin_ref();
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
mdr->snapid = snapid;
depth++;
if (mds->damage_table.is_dentry_damaged(curdir, path[depth], snapid)) {
dout(4) << "traverse: stopped lookup at damaged dentry "
<< *curdir << "/" << path[depth] << " snap=" << snapid << dendl;
- return -EIO;
+ return -CEPHFS_EIO;
}
// dentry
CDentry *dn = curdir->lookup(path[depth], snapid);
if (dn) {
if (dn->state_test(CDentry::STATE_PURGING))
- return -ENOENT;
+ return -CEPHFS_ENOENT;
if (rdlock_path) {
lov.clear();
pdnvec->push_back(dn);
CDentry::linkage_t *dnl = dn->get_projected_linkage();
- // can we conclude ENOENT?
+ // can we conclude CEPHFS_ENOENT?
if (dnl->is_null()) {
dout(10) << "traverse: null+readable dentry at " << *dn << dendl;
if (depth == path.depth() - 1) {
if (pdnvec)
pdnvec->clear(); // do not confuse likes of rdlock_path_pin_ref();
}
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
// do we have inode?
if (mds->damage_table.is_remote_damaged(dnl->get_remote_ino())) {
dout(4) << "traverse: remote dentry points to damaged ino "
<< *dn << dendl;
- return -EIO;
+ return -CEPHFS_EIO;
}
open_remote_dentry(dn, true, cf.build(),
(path_locked && depth == path.depth() - 1));
pdnvec->clear(); // do not confuse likes of rdlock_path_pin_ref();
}
}
- return -ENOENT;
+ return -CEPHFS_ENOENT;
} else {
// Check DamageTable for missing fragments before trying to fetch
if (mds->damage_table.is_dirfrag_damaged(curdir)) {
dout(4) << "traverse: damaged dirfrag " << *curdir
<< ", blocking fetch" << dendl;
- return -EIO;
+ return -CEPHFS_EIO;
}
// directory isn't complete; reload
MDCacheContext(c), ino(i), msg(m), parent(p) {}
void finish(int r) override {
if (r < 0 && !parent)
- r = -EAGAIN;
+ r = -CEPHFS_EAGAIN;
if (msg) {
mdcache->handle_open_ino(msg, r);
return;
} catch (const buffer::error &decode_exc) {
derr << "corrupt backtrace on ino x0" << std::hex << ino
<< std::dec << ": " << decode_exc.what() << dendl;
- open_ino_finish(ino, info, -EIO);
+ open_ino_finish(ino, info, -CEPHFS_EIO);
return;
}
if (backtrace.pool != info.pool && backtrace.pool != -1) {
new C_OnFinisher(fin, mds->finisher));
return;
}
- } else if (err == -ENOENT) {
+ } else if (err == -CEPHFS_ENOENT) {
int64_t meta_pool = mds->mdsmap->get_metadata_pool();
if (info.pool != meta_pool) {
dout(10) << " no object in pool " << info.pool
if (err == 0) {
if (backtrace.ancestors.empty()) {
dout(10) << " got empty backtrace " << dendl;
- err = -ESTALE;
+ err = -CEPHFS_ESTALE;
} else if (!info.ancestors.empty()) {
if (info.ancestors[0] == backtrace.ancestors[0]) {
dout(10) << " got same parents " << info.ancestors[0] << " 2 times" << dendl;
- err = -EINVAL;
+ err = -CEPHFS_EINVAL;
} else {
info.last_err = 0;
}
if (!diri->is_dir()) {
dout(10) << " " << *diri << " is not dir" << dendl;
if (i == 0)
- err = -ENOTDIR;
+ err = -CEPHFS_ENOTDIR;
break;
}
dout(10) << " no ino " << next_ino << " in " << *dir << dendl;
if (i == 0)
- err = -ENOENT;
+ err = -CEPHFS_ENOENT;
} else if (discover) {
if (!dnl) {
filepath path(name, 0);
}
dout(10) << " no ino " << next_ino << " in " << *dir << dendl;
if (i == 0)
- err = -ENOENT;
+ err = -CEPHFS_ENOENT;
}
}
if (hint && i == 0)
void MDCache::do_open_ino(inodeno_t ino, open_ino_info_t& info, int err)
{
- if (err < 0 && err != -EAGAIN) {
+ if (err < 0 && err != -CEPHFS_EAGAIN) {
info.checked.clear();
info.checking = MDS_RANK_NONE;
info.check_peers = true;
info.discover = false;
info.ancestors.clear();
}
- if (err != -ENOENT && err != -ENOTDIR)
+ if (err != -CEPHFS_ENOENT && err != -CEPHFS_ENOTDIR)
info.last_err = err;
}
dout(5) << "find_ino_peers " << ino << " hint " << hint << dendl;
CInode *in = get_inode(ino);
if (in && in->state_test(CInode::STATE_PURGING)) {
- c->complete(-ESTALE);
+ c->complete(-CEPHFS_ESTALE);
return;
}
ceph_assert(!in);
dout(10) << "_do_find_ino_peer waiting for more peers to be active" << dendl;
} else {
dout(10) << "_do_find_ino_peer failed on " << fip.ino << dendl;
- fip.fin->complete(-ESTALE);
+ fip.fin->complete(-CEPHFS_ESTALE);
find_ino_peer.erase(fip.tid);
}
} else {
if (mds->logger) mds->logger->inc(l_mds_forward);
} else if (mdr->internal_op >= 0) {
dout(10) << "request_forward on internal op; cancelling" << dendl;
- mdr->internal_op_finish->complete(-EXDEV);
+ mdr->internal_op_finish->complete(-CEPHFS_EXDEV);
} else {
dout(7) << "request_forward drop " << *mdr << " req " << *mdr->client_request
<< " was from mds" << dendl;
}
// waiters
- finish_contexts(g_ceph_context, error, -ENOENT); // finish errors directly
+ finish_contexts(g_ceph_context, error, -CEPHFS_ENOENT); // finish errors directly
mds->queue_waiters(finished);
}
const MDRequestRef& r) :
MDCacheIOContext(m), basedirfrag(f), bits(b), mdr(r) {}
void finish(int r) override {
- ceph_assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -CEPHFS_ENOENT);
mdcache->_fragment_old_purged(basedirfrag, bits, mdr);
}
void print(ostream& out) const override {
f->close_section();
} else {
derr << "cache usage exceeds dump threshold" << dendl;
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
}
return r;
}
// Cannot scrub same dentry twice at same time
if (in->scrub_is_in_progress()) {
- mds->server->respond_to_request(mdr, -EBUSY);
+ mds->server->respond_to_request(mdr, -CEPHFS_EBUSY);
return;
} else {
in->scrub_info();
dout(10) << __func__ << " " << *dir << dendl;
if (!dir->is_auth()) {
- mds->server->respond_to_request(mdr, -ESTALE);
+ mds->server->respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
dout(10) << __func__ << " " << *diri << dendl;
if (!diri->is_auth()) {
- mds->server->respond_to_request(mdr, -ESTALE);
+ mds->server->respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
if (!diri->is_dir()) {
- mds->server->respond_to_request(mdr, -ENOTDIR);
+ mds->server->respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
CInode *diri = static_cast<CInode*>(mdr->internal_op_private);
dout(10) << __func__ << " " << *diri << dendl;
if (!diri->is_auth()) {
- mds->server->respond_to_request(mdr, -ESTALE);
+ mds->server->respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
if (!diri->is_dir()) {
- mds->server->respond_to_request(mdr, -ENOTDIR);
+ mds->server->respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
{
if (is_readonly()) {
dout(10) << __func__ << ": read-only FS" << dendl;
- fin->complete(-EROFS);
+ fin->complete(-CEPHFS_EROFS);
return;
}
dout(10) << "flush_dentry " << path << dendl;
MDSRank *mds = get_mds();
// assume journal is reliable, so don't choose action based on
// g_conf()->mds_action_on_write_error.
- if (r == -EBLOCKLISTED) {
+ if (r == -CEPHFS_EBLOCKLISTED) {
derr << "we have been blocklisted (fenced), respawning..." << dendl;
mds->respawn();
} else {
if (pending_events.count(ls->seq)) {
dout(5) << __func__ << ": segment " << ls->seq << " has pending events" << dendl;
submit_mutex.unlock();
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
}
if (expiring_segments.count(ls)) {
// front = default ino and back = null
JournalPointer jp(mds->get_nodeid(), mds->mdsmap->get_metadata_pool());
const int read_result = jp.load(mds->objecter);
- if (read_result == -ENOENT) {
+ if (read_result == -CEPHFS_ENOENT) {
inodeno_t const default_log_ino = MDS_INO_LOG_OFFSET + mds->get_nodeid();
jp.front = default_log_ino;
int write_result = jp.save(mds->objecter);
// Nothing graceful we can do for this
ceph_assert(write_result >= 0);
- } else if (read_result == -EBLOCKLISTED) {
+ } else if (read_result == -CEPHFS_EBLOCKLISTED) {
derr << "Blocklisted during JournalPointer read! Respawning..." << dendl;
mds->respawn();
ceph_abort(); // Should be unreachable because respawn calls execv
if (mds->is_daemon_stopping()) {
return;
}
- completion->complete(-EAGAIN);
+ completion->complete(-CEPHFS_EAGAIN);
return;
}
dout(1) << "Erasing journal " << jp.back << dendl;
C_SaferCond recover_wait;
back.recover(&recover_wait);
int recovery_result = recover_wait.wait();
- if (recovery_result == -EBLOCKLISTED) {
+ if (recovery_result == -CEPHFS_EBLOCKLISTED) {
derr << "Blocklisted during journal recovery! Respawning..." << dendl;
mds->respawn();
ceph_abort(); // Should be unreachable because respawn calls execv
// If we are successful, or find no data, we can update the JournalPointer to
// reflect that the back journal is gone.
- if (erase_result != 0 && erase_result != -ENOENT) {
+ if (erase_result != 0 && erase_result != -CEPHFS_ENOENT) {
derr << "Failed to erase journal " << jp.back << ": " << cpp_strerror(erase_result) << dendl;
} else {
dout(1) << "Successfully erased journal, updating journal pointer" << dendl;
int recovery_result = recover_wait.wait();
dout(4) << "Journal " << jp.front << " recovered." << dendl;
- if (recovery_result == -EBLOCKLISTED) {
+ if (recovery_result == -CEPHFS_EBLOCKLISTED) {
derr << "Blocklisted during journal recovery! Respawning..." << dendl;
mds->respawn();
ceph_abort(); // Should be unreachable because respawn calls execv
delete front_journal;
return;
}
- completion->complete(-EINVAL);
+ completion->complete(-CEPHFS_EINVAL);
}
} else if (mds->is_standby_replay() || front_journal->get_stream_format() >= g_conf()->mds_journal_format) {
/* The journal is of configured format, or we are in standbyreplay and will
if (journaler->get_error()) {
r = journaler->get_error();
dout(0) << "_replay journaler got error " << r << ", aborting" << dendl;
- if (r == -ENOENT) {
+ if (r == -CEPHFS_ENOENT) {
if (mds->is_standby_replay()) {
// journal has been trimmed by somebody else
- r = -EAGAIN;
+ r = -CEPHFS_EAGAIN;
} else {
mds->clog->error() << "missing journal object";
mds->damaged_unlocked();
ceph_abort(); // Should be unreachable because damaged() calls respawn()
}
- } else if (r == -EINVAL) {
+ } else if (r == -CEPHFS_EINVAL) {
if (journaler->get_read_pos() < journaler->get_expire_pos()) {
// this should only happen if you're following somebody else
if(journaler->is_readonly()) {
- dout(0) << "expire_pos is higher than read_pos, returning EAGAIN" << dendl;
- r = -EAGAIN;
+ dout(0) << "expire_pos is higher than read_pos, returning CEPHFS_EAGAIN" << dendl;
+ r = -CEPHFS_EAGAIN;
} else {
mds->clog->error() << "invalid journaler offsets";
mds->damaged_unlocked();
journaler->reread_head(&reread_fin);
int err = reread_fin.wait();
if (err) {
- if (err == -ENOENT && mds->is_standby_replay()) {
- r = -EAGAIN;
+ if (err == -CEPHFS_ENOENT && mds->is_standby_replay()) {
+ r = -CEPHFS_EAGAIN;
dout(1) << "Journal header went away while in standby replay, journal rewritten?"
<< dendl;
break;
}
standby_trim_segments();
if (journaler->get_read_pos() < journaler->get_expire_pos()) {
- dout(0) << "expire_pos is higher than read_pos, returning EAGAIN" << dendl;
- r = -EAGAIN;
+ dout(0) << "expire_pos is higher than read_pos, returning CEPHFS_EAGAIN" << dendl;
+ r = -CEPHFS_EAGAIN;
}
}
}
return;
}
- if (r == -EBLOCKLISTED) {
+ if (r == -CEPHFS_EBLOCKLISTED) {
derr << "MDSIOContextBase: blocklisted! Restarting..." << dendl;
mds->respawn();
} else {
dout(1) << "asok_command: " << command << " " << cmdmap
<< " (starting...)" << dendl;
- int r = -ENOSYS;
+ int r = -CEPHFS_ENOSYS;
bufferlist outbl;
CachedStackStringStream css;
auto& ss = *css;
} else if (command == "heap") {
if (!ceph_using_tcmalloc()) {
ss << "not using tcmalloc";
- r = -EOPNOTSUPP;
+ r = -CEPHFS_EOPNOTSUPP;
} else {
string heapcmd;
cmd_getval(cmdmap, "heapcmd", heapcmd);
return;
} catch (const TOPNSPC::common::bad_cmd_get& e) {
ss << e.what();
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
}
}
}
// to run on Windows.
derr << "The Ceph MDS does not support running on Windows at the moment."
<< dendl;
- return -ENOSYS;
+ return -CEPHFS_ENOSYS;
#endif // _WIN32
dout(10) << "Dumping misc struct sizes:" << dendl;
<< "maximum retry time reached." << dendl;
std::lock_guard locker{mds_lock};
suicide();
- return -ETIMEDOUT;
+ return -CEPHFS_ETIMEDOUT;
}
mds_lock.lock();
<< *m->get_connection()->peer_addrs << dendl;
ss << "permission denied";
- r = -EACCES;
+ r = -CEPHFS_EACCES;
} else if (m->cmd.empty()) {
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
ss << "no command given";
} else if (!TOPNSPC::common::cmdmap_from_json(m->cmd, &cmdmap, ss)) {
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
} else {
cct->get_admin_socket()->queue_tell_command(m);
return;
int remove_data_pool(int64_t poolid) {
std::vector<int64_t>::iterator p = std::find(data_pools.begin(), data_pools.end(), poolid);
if (p == data_pools.end())
- return -ENOENT;
+ return -CEPHFS_ENOENT;
data_pools.erase(p);
return 0;
}
if (mdcache->is_readonly()) {
dout(5) << __func__ << ": read-only FS" << dendl;
- complete(-EROFS);
+ complete(-CEPHFS_EROFS);
return;
}
}
private:
- // context which completes itself (with -ETIMEDOUT) after a specified
+ // context which completes itself (with -CEPHFS_ETIMEDOUT) after a specified
// timeout or when explicitly completed, whichever comes first. Note
// that the context does not detroy itself after completion -- it
// needs to be explicitly freed.
timer_task = new LambdaContext([this](int) {
timer_task = nullptr;
- complete(-ETIMEDOUT);
+ complete(-CEPHFS_ETIMEDOUT);
});
mds->timer.add_event_after(timeout, timer_task);
}
} else if (recall_timeout > 0 && duration > recall_timeout) {
gather.set_finisher(new C_MDSInternalNoop);
gather.activate();
- return handle_recall_client_state(-ETIMEDOUT);
+ return handle_recall_client_state(-CEPHFS_ETIMEDOUT);
} else {
uint64_t remaining = (recall_timeout == 0 ? 0 : recall_timeout-duration);
C_ContextTimeout *ctx = new C_ContextTimeout(
void MDSRank::handle_write_error(int err)
{
- if (err == -EBLOCKLISTED) {
+ if (err == -CEPHFS_EBLOCKLISTED) {
derr << "we have been blocklisted (fenced), respawning..." << dendl;
respawn();
return;
{
// Handle errors from previous step
if (r < 0) {
- if (is_standby_replay() && (r == -EAGAIN)) {
- dout(0) << "boot_start encountered an error EAGAIN"
+ if (is_standby_replay() && (r == -CEPHFS_EAGAIN)) {
+ dout(0) << "boot_start encountered an error CEPHFS_EAGAIN"
<< ", respawning since we fell behind journal" << dendl;
respawn();
- } else if (r == -EINVAL || r == -ENOENT) {
+ } else if (r == -CEPHFS_EINVAL || r == -CEPHFS_ENOENT) {
// Invalid or absent data, indicates damaged on-disk structures
clog->error() << "Error loading MDS rank " << whoami << ": "
<< cpp_strerror(r);
damaged();
ceph_assert(r == 0); // Unreachable, damaged() calls respawn()
- } else if (r == -EROFS) {
+ } else if (r == -CEPHFS_EROFS) {
dout(0) << "boot error forcing transition to read-only; MDS will try to continue" << dendl;
} else {
// Completely unexpected error, give up and die
if (!got_val) {
*css << "no target epoch given";
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
goto out;
}
{
SessionFilter filter;
r = filter.parse(filter_args, css.get());
if (r != 0) {
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
goto out;
}
evict_clients(filter, on_finish);
std::string client_id;
if (!cmd_getval(cmdmap, "client_id", client_id)) {
*css << "Invalid client_id specified";
- r = -ENOENT;
+ r = -CEPHFS_ENOENT;
goto out;
}
std::lock_guard l(mds_lock);
g_conf()->mds_session_blocklist_on_evict, *css);
if (!evicted) {
dout(15) << css->strv() << dendl;
- r = -ENOENT;
+ r = -CEPHFS_ENOENT;
}
} else if (command == "session config" ||
command == "client config") {
command == "scrub_start") {
if (whoami != 0) {
*css << "Not rank 0";
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
goto out;
}
} else if (command == "scrub abort") {
if (whoami != 0) {
*css << "Not rank 0";
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
goto out;
}
} else if (command == "scrub pause") {
if (whoami != 0) {
*css << "Not rank 0";
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
goto out;
}
} else if (command == "scrub resume") {
if (whoami != 0) {
*css << "Not rank 0";
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
goto out;
}
command_scrub_resume(f);
} else if (command == "tag path") {
if (whoami != 0) {
*css << "Not rank 0";
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
goto out;
}
string path;
string path;
if(!cmd_getval(cmdmap, "path", path)) {
*css << "malformed path";
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
goto out;
}
int64_t rank;
if(!cmd_getval(cmdmap, "rank", rank)) {
*css << "malformed rank";
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
goto out;
}
command_export_dir(f, path, (mds_rank_t)rank);
if (mdsmap->get_tableserver() == whoami) {
snapserver->dump(f);
} else {
- r = -EXDEV;
+ r = -CEPHFS_EXDEV;
*css << "Not snapserver";
}
} else {
std::lock_guard l(mds_lock);
damage_entry_id_t id = 0;
if (!cmd_getval(cmdmap, "damage_id", (int64_t&)id)) {
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
goto out;
}
damage_table.erase(id);
} else {
- r = -ENOSYS;
+ r = -CEPHFS_ENOSYS;
}
out:
on_finish(r, css->str(), outbl);
{
bufferlist outbl;
if (is_any_replay()) {
- on_finish(-EAGAIN, "MDS is replaying log", outbl);
+ on_finish(-CEPHFS_EAGAIN, "MDS is replaying log", outbl);
return;
}
if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) {
derr << "bad MDS target " << target << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
CInode *in = mdcache->cache_traverse(fp);
if (!in) {
derr << "Bath path '" << path << "'" << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
CDir *dir = in->get_dirfrag(frag_t());
if (!dir || !(dir->is_auth())) {
derr << "bad export_dir path dirfrag frag_t() or dir not auth" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
mdcache->migrator->export_dir(dir, target);
Session *session = sessionmap.get_session(entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
if (!session) {
ss << "session " << session_id << " not in sessionmap!";
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
if (option == "timeout") {
auto it = session->info.client_metadata.find("timeout");
if (it == session->info.client_metadata.end()) {
ss << "Nonexistent config: " << option;
- return -ENODATA;
+ return -CEPHFS_ENODATA;
}
session->info.client_metadata.erase(it);
} else {
strtoul(value.c_str(), &end, 0);
if (*end) {
ss << "Invalid config for timeout: " << value;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
session->info.client_metadata[option] = value;
}
//sessionmap._mark_dirty(session, true);
} else {
ss << "Invalid config option: " << option;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
return 0;
{
ceph_assert(is_opening());
state = STATE_ACTIVE;
- if (r == -EBLOCKLISTED) {
+ if (r == -CEPHFS_EBLOCKLISTED) {
mds->respawn();
return;
}
if (luaL_loadstring(L, script.data())) {
mantle_dout(0) << "WARNING: mantle could not load balancer: "
<< lua_tostring(L, -1) << mantle_dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
/* tell the balancer which mds is making the decision */
if (lua_pcall(L, 0, 1, 0) != LUA_OK) {
mantle_dout(0) << "WARNING: mantle could not execute script: "
<< lua_tostring(L, -1) << mantle_dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
/* parse response by iterating over Lua stack */
if (lua_istable(L, -1) == 0) {
mantle_dout(0) << "WARNING: mantle script returned a malformed response" << mantle_dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
/* fill in return value */
for (lua_pushnil(L); lua_next(L, -2); lua_pop(L, 1)) {
if (!lua_isinteger(L, -2) || !lua_isnumber(L, -1)) {
mantle_dout(0) << "WARNING: mantle script returned a malformed response" << mantle_dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
mds_rank_t rank(lua_tointeger(L, -2));
my_targets[rank] = lua_tonumber(L, -1);
std::map<std::string, bufferlist> &values)
{
using ceph::decode;
- int err = -EINVAL;
+ int err = -CEPHFS_EINVAL;
auto decode_func = [this](unsigned idx, inodeno_t ino, bufferlist &bl) {
auto p = bl.cbegin();
waiting_for_recovery.push_back(completion);
journaler.recover(new LambdaContext([this](int r){
- if (r == -ENOENT) {
+ if (r == -CEPHFS_ENOENT) {
dout(1) << "Purge Queue not found, assuming this is an upgrade and "
"creating it." << dendl;
create(NULL);
c->complete(0);
} else if (readonly) {
dout(10) << "cannot wait for recovery: PurgeQueue is readonly" << dendl;
- c->complete(-EROFS);
+ c->complete(-CEPHFS_EROFS);
} else {
waiting_for_recovery.push_back(c);
}
if (readonly) {
dout(10) << "cannot push inode: PurgeQueue is readonly" << dendl;
- completion->complete(-EROFS);
+ completion->complete(-CEPHFS_EROFS);
return;
}
std::lock_guard l(lock);
if (r == 0) {
_consume();
- } else if (r != -EAGAIN) {
+ } else if (r != -CEPHFS_EAGAIN) {
_go_readonly(r);
}
}));
} catch (const buffer::error &err) {
derr << "Decode error at read_pos=0x" << std::hex
<< journaler.get_read_pos() << dendl;
- _go_readonly(EIO);
+ _go_readonly(CEPHFS_EIO);
}
dout(20) << " executing item (" << item.ino << ")" << dendl;
_execute_item(item, journaler.get_read_pos());
new LambdaContext([this, expire_to](int r) {
std::lock_guard l(lock);
- if (r == -EBLOCKLISTED) {
+ if (r == -CEPHFS_EBLOCKLISTED) {
finisher.queue(on_error, r);
on_error = nullptr;
return;
if (r != 0) {
dout(0) << "recovery error! " << r << dendl;
- if (r == -EBLOCKLISTED) {
+ if (r == -CEPHFS_EBLOCKLISTED) {
mds->respawn();
return;
} else {
if (CInode *in = dynamic_cast<CInode*>(obj)) {
if (in->scrub_is_in_progress()) {
dout(10) << __func__ << " with {" << *in << "}" << ", already in scrubbing" << dendl;
- return -EBUSY;
+ return -CEPHFS_EBUSY;
}
dout(10) << __func__ << " with {" << *in << "}" << ", top=" << top << dendl;
} else if (CDir *dir = dynamic_cast<CDir*>(obj)) {
if (dir->scrub_is_in_progress()) {
dout(10) << __func__ << " with {" << *dir << "}" << ", already in scrubbing" << dendl;
- return -EBUSY;
+ return -CEPHFS_EBUSY;
}
dout(10) << __func__ << " with {" << *dir << "}" << ", top=" << top << dendl;
{
// abort in progress
if (clear_stack)
- return -EAGAIN;
+ return -CEPHFS_EAGAIN;
header->set_origin(in->ino());
auto ret = scrubbing_map.emplace(header->get_tag(), header);
if (!ret.second) {
dout(10) << __func__ << " with {" << *in << "}"
<< ", conflicting tag " << header->get_tag() << dendl;
- return -EEXIST;
+ return -CEPHFS_EEXIST;
}
int r = _enqueue(in, header, top);
// abort is in progress
if (clear_stack) {
if (on_finish)
- on_finish->complete(-EINVAL);
+ on_finish->complete(-CEPHFS_EINVAL);
return;
}
int r = 0;
if (clear_stack) {
- r = -EINVAL;
+ r = -CEPHFS_EINVAL;
} else if (state == STATE_PAUSING) {
set_state(STATE_RUNNING);
- complete_control_contexts(-ECANCELED);
+ complete_control_contexts(-CEPHFS_ECANCELED);
} else if (state == STATE_PAUSED) {
set_state(STATE_RUNNING);
kick_off_scrubs();
* caller should provide a context which is completed after all
* in-progress scrub operations are completed and pending inodes
* are removed from the scrub stack (with the context callbacks for
- * inodes completed with -ECANCELED).
+ * inodes completed with -CEPHFS_ECANCELED).
* @param on_finish Context callback to invoke after abort
*/
void scrub_abort(Context *on_finish);
/**
* Resume a paused scrub. Unlike abort or pause, this is instantaneous.
* Pending pause operations are cancelled (context callbacks are
- * invoked with -ECANCELED).
- * @returns 0 (success) if resumed, -EINVAL if an abort is in-progress.
+ * invoked with -CEPHFS_ECANCELED).
+ * @returns 0 (success) if resumed, -CEPHFS_EINVAL if an abort is in-progress.
*/
bool scrub_resume();
/**
* Abort pending scrubs for inodes waiting in the inode stack.
- * Completion context is complete with -ECANCELED.
+ * Completion context is complete with -CEPHFS_ECANCELED.
*/
void abort_pending_scrubs();
auto reply = make_message<MClientReclaimReply>(0);
if (m->get_uuid().empty()) {
dout(10) << __func__ << " invalid message (no uuid)" << dendl;
- reply->set_result(-EINVAL);
+ reply->set_result(-CEPHFS_EINVAL);
mds->send_message_client(reply, session);
return;
}
unsigned flags = m->get_flags();
if (flags != CEPH_RECLAIM_RESET) { // currently only support reset
dout(10) << __func__ << " unsupported flags" << dendl;
- reply->set_result(-EOPNOTSUPP);
+ reply->set_result(-CEPHFS_EOPNOTSUPP);
mds->send_message_client(reply, session);
return;
}
if (session->info.auth_name != target->info.auth_name) {
dout(10) << __func__ << " session auth_name " << session->info.auth_name
<< " != target auth_name " << target->info.auth_name << dendl;
- reply->set_result(-EPERM);
+ reply->set_result(-CEPHFS_EPERM);
mds->send_message_client(reply, session);
}
if (req->may_write() && mdcache->is_readonly()) {
dout(10) << " read-only FS" << dendl;
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
if (mdr->has_more() && mdr->more()->peer_error) {
(!mdr->has_more() || mdr->more()->witnessed.empty())) // haven't started peer request
) {
- dout(20) << __func__ << ": full, responding ENOSPC to op " << ceph_mds_op_name(req->get_op()) << dendl;
- respond_to_request(mdr, -ENOSPC);
+ dout(20) << __func__ << ": full, responding CEPHFS_ENOSPC to op " << ceph_mds_op_name(req->get_op()) << dendl;
+ respond_to_request(mdr, -CEPHFS_ENOSPC);
return;
} else {
dout(20) << __func__ << ": full, permitting op " << ceph_mds_op_name(req->get_op()) << dendl;
default:
dout(1) << " unknown client op " << req->get_op() << dendl;
- respond_to_request(mdr, -EOPNOTSUPP);
+ respond_to_request(mdr, -CEPHFS_EOPNOTSUPP);
}
}
ceph_assert(ret);
if (ack->is_error_rofs()) {
- mdr->more()->peer_error = -EROFS;
+ mdr->more()->peer_error = -CEPHFS_EROFS;
} else if (ack->is_error_wouldblock()) {
- mdr->more()->peer_error = -EWOULDBLOCK;
+ mdr->more()->peer_error = -CEPHFS_EWOULDBLOCK;
}
// go again?
{
const auto size = in->get_frag_size();
if (size >= g_conf()->mds_bal_fragment_size_max) {
- dout(10) << "fragment " << *in << " size exceeds " << g_conf()->mds_bal_fragment_size_max << " (ENOSPC)" << dendl;
- respond_to_request(mdr, -ENOSPC);
+ dout(10) << "fragment " << *in << " size exceeds " << g_conf()->mds_bal_fragment_size_max << " (CEPHFS_ENOSPC)" << dendl;
+ respond_to_request(mdr, -CEPHFS_ENOSPC);
return false;
}
public:
C_MDS_TryFindInode(Server *s, MDRequestRef& r) : ServerContext(s), mdr(r) {}
void finish(int r) override {
- if (r == -ESTALE) // :( find_ino_peers failed
+ if (r == -CEPHFS_ESTALE) // :( find_ino_peers failed
server->respond_to_request(mdr, r);
else
server->dispatch_client_request(mdr);
if (r > 0)
return nullptr; // delayed
if (r < 0) { // error
- if (r == -ENOENT && !mdr->dn[0].empty()) {
+ if (r == -CEPHFS_ENOENT && !mdr->dn[0].empty()) {
if (mdr->client_request &&
mdr->client_request->get_dentry_wanted())
mdr->tracedn = mdr->dn[0].back();
respond_to_request(mdr, r);
- } else if (r == -ESTALE) {
- dout(10) << "FAIL on ESTALE but attempting recovery" << dendl;
+ } else if (r == -CEPHFS_ESTALE) {
+ dout(10) << "FAIL on CEPHFS_ESTALE but attempting recovery" << dendl;
MDSContext *c = new C_MDS_TryFindInode(this, mdr);
mdcache->find_ino_peers(refpath.get_ino(), c);
} else {
// figure parent dir vs dname
if (refpath.depth() == 0) {
dout(7) << "invalid path (zero length)" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return nullptr;
}
if (refpath.is_last_snap()) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return nullptr;
}
if (refpath.is_last_dot_or_dotdot()) {
dout(7) << "invalid path (last dot or dot_dot)" << dendl;
if (create)
- respond_to_request(mdr, -EEXIST);
+ respond_to_request(mdr, -CEPHFS_EEXIST);
else
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return nullptr;
}
if (r > 0)
return nullptr; // delayed
if (r < 0) {
- if (r == -ESTALE) {
- dout(10) << "FAIL on ESTALE but attempting recovery" << dendl;
+ if (r == -CEPHFS_ESTALE) {
+ dout(10) << "FAIL on CEPHFS_ESTALE but attempting recovery" << dendl;
mdcache->find_ino_peers(refpath.get_ino(), new C_MDS_TryFindInode(this, mdr));
return nullptr;
}
if (!mdr->reqid.name.is_mds()) {
if (diri->is_system() && !diri->is_root()) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return nullptr;
}
}
if (!diri->is_base() && diri->get_projected_parent_dir()->inode->is_stray()) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return nullptr;
}
CDentry::linkage_t *dnl = dn->get_projected_linkage();
if (dnl->is_null()) {
if (!create && okexist) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return nullptr;
}
dn->first = std::max(dn->first, next_snap);
} else {
if (!okexist) {
- respond_to_request(mdr, -EEXIST);
+ respond_to_request(mdr, -CEPHFS_EEXIST);
return nullptr;
}
mdr->in[0] = dnl->get_inode();
return std::make_pair(mdr->dn[0].back(), mdr->dn[1].back());
if (refpath.depth() != 1 || refpath2.depth() != 1) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return std::pair<CDentry*, CDentry*>(nullptr, nullptr);
}
if (refpath.is_last_snap() || refpath2.is_last_snap()) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return std::make_pair(nullptr, nullptr);
}
int flags = MDS_TRAVERSE_RDLOCK_SNAP | MDS_TRAVERSE_WANT_DENTRY | MDS_TRAVERSE_WANT_AUTH;
int r = mdcache->path_traverse(mdr, cf, refpath, flags, &mdr->dn[0]);
if (r != 0) {
- if (r == -ESTALE) {
- dout(10) << "ESTALE on path, attempting recovery" << dendl;
+ if (r == -CEPHFS_ESTALE) {
+ dout(10) << "CEPHFS_ESTALE on path, attempting recovery" << dendl;
mdcache->find_ino_peers(refpath.get_ino(), new C_MDS_TryFindInode(this, mdr));
} else if (r < 0) {
respond_to_request(mdr, r);
flags = MDS_TRAVERSE_RDLOCK_SNAP2 | MDS_TRAVERSE_WANT_DENTRY | MDS_TRAVERSE_DISCOVER;
r = mdcache->path_traverse(mdr, cf, refpath2, flags, &mdr->dn[1]);
if (r != 0) {
- if (r == -ESTALE) {
- dout(10) << "ESTALE on path2, attempting recovery" << dendl;
+ if (r == -CEPHFS_ESTALE) {
+ dout(10) << "CEPHFS_ESTALE on path2, attempting recovery" << dendl;
mdcache->find_ino_peers(refpath2.get_ino(), new C_MDS_TryFindInode(this, mdr));
} else if (r < 0) {
respond_to_request(mdr, r);
if (!mdr->reqid.name.is_mds()) {
if ((srcdir->get_inode()->is_system() && !srcdir->get_inode()->is_root()) ||
(destdir->get_inode()->is_system() && !destdir->get_inode()->is_root())) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return std::make_pair(nullptr, nullptr);
}
}
if (!destdir->get_inode()->is_base() &&
destdir->get_inode()->get_projected_parent_dir()->inode->is_stray()) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return std::make_pair(nullptr, nullptr);
}
return std::make_pair(nullptr, nullptr);
if (srcdn->get_projected_linkage()->is_null()) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return std::make_pair(nullptr, nullptr);
}
if (req->get_filepath().depth() == 0 && is_lookup) {
// refpath can't be empty for lookup but it can for
// getattr (we do getattr with empty refpath for mount of '/')
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
inodeno_t ino = req->get_filepath().get_ino();
CInode *in = mdcache->get_inode(ino);
if (in && in->state_test(CInode::STATE_PURGING)) {
- respond_to_request(mdr, -ESTALE);
+ respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
if (!in) {
if (want_parent) {
if (in->is_base()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!diri || diri->is_stray()) {
- respond_to_request(mdr, -ESTALE);
+ respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
dout(10) << "reply to lookup_parent " << *in << dendl;
if (want_dentry) {
inodeno_t dirino = req->get_filepath2().get_ino();
if (!diri || (dirino != inodeno_t() && diri->ino() != dirino)) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return;
}
dout(10) << "reply to lookup_name " << *in << dendl;
if (in->state_test(CInode::STATE_PURGING) ||
!in->has_snap_data(vino.snapid)) {
if (in->is_dir() || !parent_ino) {
- respond_to_request(mdr, -ESTALE);
+ respond_to_request(mdr, -CEPHFS_ESTALE);
return;
}
in = NULL;
}
if (!diri->is_dir()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
return;
}
- respond_to_request(mdr, -ESTALE);
+ respond_to_request(mdr, -CEPHFS_ESTALE);
} else {
mdcache->open_ino(vino.ino, mds->mdsmap->get_metadata_pool(), new C_MDS_LookupIno2(this, mdr), false);
}
}
// give up
- if (r == -ENOENT || r == -ENODATA)
- r = -ESTALE;
+ if (r == -CEPHFS_ENOENT || r == -CEPHFS_ENODATA)
+ r = -CEPHFS_ESTALE;
respond_to_request(mdr, r);
}
int flags = req->head.args.open.flags;
int cmode = ceph_flags_to_mode(flags);
if (cmode < 0) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if ((cmode & CEPH_FILE_MODE_WR) && mdcache->is_readonly()) {
dout(7) << "read-only FS" << dendl;
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
// regular file?
/*if (!cur->inode.is_file() && !cur->inode.is_dir()) {
dout(7) << "not a file or dir " << *cur << dendl;
- respond_to_request(mdr, -ENXIO); // FIXME what error do we want?
+ respond_to_request(mdr, -CEPHFS_ENXIO); // FIXME what error do we want?
return;
}*/
if ((flags & CEPH_O_DIRECTORY) && !cur->is_dir() && !cur->is_symlink()) {
dout(7) << "specified O_DIRECTORY on non-directory " << *cur << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if ((flags & CEPH_O_TRUNC) && !cur->is_file()) {
dout(7) << "specified O_TRUNC on !(file|symlink) " << *cur << dendl;
- // we should return -EISDIR for directory, return -EINVAL for other non-regular
- respond_to_request(mdr, cur->is_dir() ? -EISDIR : -EINVAL);
+ // we should return -CEPHFS_EISDIR for directory, return -CEPHFS_EINVAL for other non-regular
+ respond_to_request(mdr, cur->is_dir() ? -CEPHFS_EISDIR : -CEPHFS_EINVAL);
return;
}
if (cur->get_inode()->inline_data.version != CEPH_INLINE_NONE &&
!mdr->session->get_connection()->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) {
dout(7) << "old client cannot open inline data file " << *cur << dendl;
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
if (mdr->snapid != CEPH_NOSNAP &&
((cmode & CEPH_FILE_MODE_WR) || req->may_write())) {
dout(7) << "snap " << mdr->snapid << " is read-only " << *cur << dendl;
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
int cmode = ceph_flags_to_mode(req->head.args.open.flags);
if (cmode < 0) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
return;
}
dn->set_alternate_name(req->get_alternate_name());
if (!layout.is_valid()) {
dout(10) << " invalid initial file layout" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!mds->mdsmap->is_data_pool(layout.pool_id)) {
dout(10) << " invalid data pool " << layout.pool_id << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
// it's a directory, right?
if (!diri->is_dir()) {
// not a dir
- dout(10) << "reply to " << *req << " readdir -ENOTDIR" << dendl;
- respond_to_request(mdr, -ENOTDIR);
+ dout(10) << "reply to " << *req << " readdir -CEPHFS_ENOTDIR" << dendl;
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
default:
dout(10) << "got unknown lock type " << set_lock.type
<< ", dropping request!" << dendl;
- respond_to_request(mdr, -EOPNOTSUPP);
+ respond_to_request(mdr, -CEPHFS_EOPNOTSUPP);
return;
}
if (mdr->more()->flock_was_waiting &&
!lock_state->is_waiting(set_lock)) {
dout(10) << " was waiting for lock but not anymore, must have been canceled " << set_lock << dendl;
- respond_to_request(mdr, -EINTR);
+ respond_to_request(mdr, -CEPHFS_EINTR);
} else if (!lock_state->add_lock(set_lock, will_wait, mdr->more()->flock_was_waiting, &deadlock)) {
dout(10) << " it failed on this attempt" << dendl;
// couldn't set lock right now
if (deadlock) {
- respond_to_request(mdr, -EDEADLK);
+ respond_to_request(mdr, -CEPHFS_EDEADLK);
} else if (!will_wait) {
- respond_to_request(mdr, -EWOULDBLOCK);
+ respond_to_request(mdr, -CEPHFS_EWOULDBLOCK);
} else {
dout(10) << " added to waiting list" << dendl;
ceph_assert(lock_state->is_waiting(set_lock));
default:
dout(10) << "got unknown lock type " << checking_lock.type << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
lock_state->look_for_lock(checking_lock);
if (!cur) return;
if (mdr->snapid != CEPH_NOSNAP) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
if (cur->ino() < MDS_INO_SYSTEM_BASE && !cur->is_base()) {
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
uint64_t old_size = std::max<uint64_t>(pip->size, req->head.args.setattr.old_size);
- // ENOSPC on growing file while full, but allow shrinks
+ // CEPHFS_ENOSPC on growing file while full, but allow shrinks
if (is_full && req->head.args.setattr.size > old_size) {
- dout(20) << __func__ << ": full, responding ENOSPC to setattr with larger size" << dendl;
- respond_to_request(mdr, -ENOSPC);
+ dout(20) << __func__ << ": full, responding CEPHFS_ENOSPC to setattr with larger size" << dendl;
+ respond_to_request(mdr, -CEPHFS_ENOSPC);
return;
}
if (!cur) return;
if (mdr->snapid != CEPH_NOSNAP) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
if (!cur->is_file()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (cur->get_projected_inode()->size ||
cur->get_projected_inode()->truncate_seq > 1) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
if (!layout.is_valid()) {
dout(10) << "bad layout" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!mds->mdsmap->is_data_pool(layout.pool_id)) {
dout(10) << " invalid data pool " << layout.pool_id << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
{
CInode *in = mdcache->get_inode(ino);
if (!in || in->state_test(CInode::STATE_PURGING)) {
- respond_to_request(mdr, -ESTALE);
+ respond_to_request(mdr, -CEPHFS_ESTALE);
return nullptr;
}
if (!in->is_auth()) {
return;
if (!cur->is_dir()) {
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
if (!layout.is_valid()) {
dout(10) << "bad layout" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!mds->mdsmap->is_data_pool(layout.pool_id)) {
dout(10) << " invalid data pool " << layout.pool_id << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
keys_and_values<string::iterator> p; // create instance of parser
std::map<string, string> m; // map to receive results
if (!qi::parse(begin, end, p, m)) { // returns true if successful
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
string left(begin, end);
dout(10) << " parsed " << m << " left '" << left << "'" << dendl;
if (begin != end)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
for (map<string,string>::iterator q = m.begin(); q != m.end(); ++q) {
// Skip validation on each attr, we do it once at the end (avoid
// rejecting intermediate states if the overall result is ok)
int64_t pool = osdmap.lookup_pg_pool_name(value);
if (pool < 0) {
dout(10) << " unknown pool " << value << dendl;
- return -ENOENT;
+ return -CEPHFS_ENOENT;
}
layout->pool_id = pool;
}
layout->pool_ns = value;
} else {
dout(10) << " unknown layout vxattr " << name << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse int for " << name << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (validate && !layout->is_valid()) {
dout(10) << "bad layout" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (!mds->mdsmap->is_data_pool(layout->pool_id)) {
dout(10) << " invalid data pool " << layout->pool_id << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
return 0;
}
keys_and_values<string::iterator> p; // create instance of parser
std::map<string, string> m; // map to receive results
if (!qi::parse(begin, end, p, m)) { // returns true if successful
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
string left(begin, end);
dout(10) << " parsed " << m << " left '" << left << "'" << dendl;
if (begin != end)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
for (map<string,string>::iterator q = m.begin(); q != m.end(); ++q) {
int r = parse_quota_vxattr(string("quota.") + q->first, q->second, quota);
if (r < 0)
} else if (name == "quota.max_bytes") {
int64_t q = boost::lexical_cast<int64_t>(value);
if (q < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
quota->max_bytes = q;
} else if (name == "quota.max_files") {
int64_t q = boost::lexical_cast<int64_t>(value);
if (q < 0)
- return -EINVAL;
+ return -CEPHFS_EINVAL;
quota->max_files = q;
} else {
dout(10) << " unknown quota vxattr " << name << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse int for " << name << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (!quota->is_valid()) {
dout(10) << "bad quota" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
return 0;
}
epoch = osdmap.get_epoch();
});
- if (r == -ENOENT) {
+ if (r == -CEPHFS_ENOENT) {
// we don't have the specified pool, make sure our map
// is newer than or as new as the client.
if (r < 0) {
- if (r == -ENOENT)
- r = -EINVAL;
+ if (r == -CEPHFS_ENOENT)
+ r = -CEPHFS_EINVAL;
respond_to_request(mdr, r);
return r;
bool adjust_realm = false;
if (name.compare(0, 15, "ceph.dir.layout") == 0) {
if (!cur->is_dir()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
pip = pi.inode.get();
} else if (name.compare(0, 16, "ceph.file.layout") == 0) {
if (!cur->is_file()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (cur->get_projected_inode()->size ||
cur->get_projected_inode()->truncate_seq > 1) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
file_layout_t layout = cur->get_projected_inode()->layout;
pip = pi.inode.get();
} else if (name.compare(0, 10, "ceph.quota") == 0) {
if (!cur->is_dir()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
mdcache->broadcast_quota_to_client(cur, exclude_ct, true);
} else if (name == "ceph.dir.subvolume"sv) {
if (!cur->is_dir()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
val = boost::lexical_cast<bool>(value);
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse bool for " << name << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
inodeno_t subvol_ino = realm->get_subvolume_ino();
// can't create subvolume inside another subvolume
if (subvol_ino && subvol_ino != cur->ino()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
}
adjust_realm = true;
} else if (name == "ceph.dir.pin"sv) {
if (!cur->is_dir() || cur->is_root()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (rank < 0) rank = MDS_RANK_NONE;
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse int for " << name << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
pip = pi.inode.get();
} else if (name == "ceph.dir.pin.random"sv) {
if (!cur->is_dir() || cur->is_root()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
val = boost::lexical_cast<double>(value);
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse float for " << name << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (val < 0.0 || 1.0 < val) {
- respond_to_request(mdr, -EDOM);
+ respond_to_request(mdr, -CEPHFS_EDOM);
return;
} else if (mdcache->export_ephemeral_random_max < val) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
pip = pi.inode.get();
} else if (name == "ceph.dir.pin.distributed"sv) {
if (!cur->is_dir() || cur->is_root()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
val = boost::lexical_cast<bool>(value);
} catch (boost::bad_lexical_cast const&) {
dout(10) << "bad vxattr value, unable to parse bool for " << name << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
pip = pi.inode.get();
} else {
dout(10) << " unknown vxattr " << name << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (name == "ceph.dir.layout") {
if (!cur->is_dir()) {
- respond_to_request(mdr, -ENODATA);
+ respond_to_request(mdr, -CEPHFS_ENODATA);
return;
}
if (cur->is_root()) {
dout(10) << "can't remove layout policy on the root directory" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!cur->get_projected_inode()->has_layout()) {
- respond_to_request(mdr, -ENODATA);
+ respond_to_request(mdr, -CEPHFS_ENODATA);
return;
}
return;
}
- respond_to_request(mdr, -ENODATA);
+ respond_to_request(mdr, -CEPHFS_ENODATA);
}
const Server::XattrHandler Server::xattr_handlers[] = {
if (op == CEPH_MDS_OP_SETXATTR) {
if (xattrs) {
if ((flags & CEPH_XATTR_CREATE) && xattrs->count(mempool::mds_co::string(xattr_name))) {
- dout(10) << "setxattr '" << xattr_name << "' XATTR_CREATE and EEXIST on " << *cur << dendl;
- return -EEXIST;
+ dout(10) << "setxattr '" << xattr_name << "' XATTR_CREATE and CEPHFS_EEXIST on " << *cur << dendl;
+ return -CEPHFS_EEXIST;
}
}
if ((flags & CEPH_XATTR_REPLACE) && !(xattrs && xattrs->count(mempool::mds_co::string(xattr_name)))) {
- dout(10) << "setxattr '" << xattr_name << "' XATTR_REPLACE and ENODATA on " << *cur << dendl;
- return -ENODATA;
+ dout(10) << "setxattr '" << xattr_name << "' XATTR_REPLACE and CEPHFS_ENODATA on " << *cur << dendl;
+ return -CEPHFS_ENODATA;
}
return 0;
if (op == CEPH_MDS_OP_RMXATTR) {
if (xattrs && xattrs->count(mempool::mds_co::string(xattr_name)) == 0) {
- dout(10) << "removexattr '" << xattr_name << "' and ENODATA on " << *cur << dendl;
- return -ENODATA;
+ dout(10) << "removexattr '" << xattr_name << "' and CEPHFS_ENODATA on " << *cur << dendl;
+ return -CEPHFS_ENODATA;
}
return 0;
}
derr << ": unhandled validation for: " << xattr_name << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
void Server::xattr_set(InodeStoreBase::xattr_map_ptr xattrs, const std::string &xattr_name,
std::regex_search(value, match, regex);
if (match.size() != 3) {
derr << "mirror info parse error" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
cluster_id = match[1];
int Server::mirror_info_xattr_validate(CInode *cur, const InodeStoreBase::xattr_map_const_ptr xattrs,
XattrOp *xattr_op) {
if (!cur->is_root()) {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
int v1 = xattr_validate(cur, xattrs, Server::MirrorXattrInfo::CLUSTER_ID, xattr_op->op, xattr_op->flags);
int v2 = xattr_validate(cur, xattrs, Server::MirrorXattrInfo::FS_ID, xattr_op->op, xattr_op->flags);
if (v1 != v2) {
derr << "inconsistent mirror info state (" << v1 << "," << v2 << ")" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
if (v1 < 0) {
}
if (!is_allowed_ceph_xattr(name)) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
return;
if (mdr->snapid != CEPH_NOSNAP) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
if (((cur_xattrs_size + inc) > g_conf()->mds_max_xattr_pairs_size)) {
dout(10) << "xattr kv pairs size too big. cur_xattrs_size "
<< cur_xattrs_size << ", inc " << inc << dendl;
- respond_to_request(mdr, -ENOSPC);
+ respond_to_request(mdr, -CEPHFS_ENOSPC);
return;
}
}
}
if (!is_allowed_ceph_xattr(name)) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
return;
if (mdr->snapid != CEPH_NOSNAP) {
- respond_to_request(mdr, -EROFS);
+ respond_to_request(mdr, -CEPHFS_EROFS);
return;
}
ceph_assert(dn->get_projected_linkage()->is_null());
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
return;
}
dn->set_alternate_name(req->get_alternate_name());
ceph_assert(dn->get_projected_linkage()->is_null());
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
return;
}
dn->set_alternate_name(req->get_alternate_name());
ceph_assert(dn->get_projected_linkage()->is_null());
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
}
dn->set_alternate_name(req->get_alternate_name());
if (req->get_filepath2().depth() == 0) {
targeti = mdcache->get_inode(req->get_filepath2().get_ino());
if (!targeti) {
- dout(10) << "ESTALE on path2, attempting recovery" << dendl;
+ dout(10) << "CEPHFS_ESTALE on path2, attempting recovery" << dendl;
mdcache->find_ino_peers(req->get_filepath2().get_ino(), new C_MDS_TryFindInode(this, mdr));
return;
}
CDentry *pdn = targeti->get_projected_parent_dn();
if (!pdn) {
dout(7) << "target has no parent dn, failing..." << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (!mds->locker->try_rdlock_snap_layout(pdn->get_dir()->get_inode(), mdr, 1))
return;
if (!destdn->get_projected_linkage()->is_null()) {
- respond_to_request(mdr, -EEXIST);
+ respond_to_request(mdr, -CEPHFS_EEXIST);
return;
}
ceph_assert(destdn->get_projected_linkage()->is_null());
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
return;
}
destdn->set_alternate_name(req->get_alternate_name());
if (targeti->is_dir()) {
dout(7) << "target is a dir, failing..." << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (targeti->get_projected_inode()->nlink == 0) {
dout(7) << "target has no link, failing..." << dendl;
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
}
if ((!mdr->has_more() || mdr->more()->witnessed.empty())) {
target_realm->get_subvolume_ino() !=
dir->inode->find_snaprealm()->get_subvolume_ino()) {
dout(7) << "target is in different subvolume, failing..." << dendl;
- respond_to_request(mdr, -EXDEV);
+ respond_to_request(mdr, -CEPHFS_EXDEV);
return;
}
if (rmdir) {
// do empty directory checks
if (_dir_is_nonempty_unlocked(mdr, in)) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
} else {
dout(7) << "handle_client_unlink on dir " << *in << ", returning error" << dendl;
- respond_to_request(mdr, -EISDIR);
+ respond_to_request(mdr, -CEPHFS_EISDIR);
return;
}
} else {
if (rmdir) {
// unlink
dout(7) << "handle_client_rmdir on non-dir " << *in << ", returning error" << dendl;
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
}
if (in->is_dir() &&
_dir_is_nonempty(mdr, in)) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
MDS_TRAVERSE_DISCOVER | MDS_TRAVERSE_PATH_LOCKED,
&trace, &in);
if (r > 0) return;
- if (r == -ESTALE) {
+ if (r == -CEPHFS_ESTALE) {
mdcache->find_ino_peers(srcpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr),
mdr->peer_to_mds, true);
return;
filepath destpath = req->get_filepath();
filepath srcpath = req->get_filepath2();
if (srcpath.is_last_dot_or_dotdot() || destpath.is_last_dot_or_dotdot()) {
- respond_to_request(mdr, -EBUSY);
+ respond_to_request(mdr, -CEPHFS_EBUSY);
return;
}
if (req->get_alternate_name().size() > alternate_name_max) {
dout(10) << " alternate_name longer than " << alternate_name_max << dendl;
- respond_to_request(mdr, -ENAMETOOLONG);
+ respond_to_request(mdr, -CEPHFS_ENAMETOOLONG);
return;
}
// e.g. mv /usr /usr/foo
if (srci->is_dir() && srci->is_projected_ancestor_of(destdir->get_inode())) {
dout(7) << "cannot rename item to be a child of itself" << dendl;
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
MDS_INO_IS_STRAY(destpath.get_ino())) &&
!(destdnl->is_remote() &&
destdnl->get_remote_ino() == srci->ino())) {
- respond_to_request(mdr, -EINVAL); // actually, this won't reply, but whatev.
+ respond_to_request(mdr, -CEPHFS_EINVAL); // actually, this won't reply, but whatev.
return;
}
// non-empty dir? do trivial fast unlocked check, do another check later with read locks
if (oldin->is_dir() && _dir_is_nonempty_unlocked(mdr, oldin)) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
// mv /some/thing /to/some/existing_other_thing
if (oldin->is_dir() && !srci->is_dir()) {
- respond_to_request(mdr, -EISDIR);
+ respond_to_request(mdr, -CEPHFS_EISDIR);
return;
}
if (!oldin->is_dir() && srci->is_dir()) {
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
if (srci == oldin && !srcdir->inode->is_stray()) {
}
if (destdn->get_alternate_name() != req->get_alternate_name()) {
/* the dentry exists but the alternate_names do not match, fail... */
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
}
if (oldin &&
oldin->is_dir() &&
_dir_is_nonempty(mdr, oldin)) {
- respond_to_request(mdr, -ENOTEMPTY);
+ respond_to_request(mdr, -CEPHFS_ENOTEMPTY);
return;
}
src_realm = srcdir->inode->find_snaprealm();
if (src_realm != dest_realm &&
src_realm->get_subvolume_ino() != dest_realm->get_subvolume_ino()) {
- respond_to_request(mdr, -EXDEV);
+ respond_to_request(mdr, -CEPHFS_EXDEV);
return;
}
}
MDS_TRAVERSE_DISCOVER | MDS_TRAVERSE_PATH_LOCKED | MDS_TRAVERSE_WANT_DENTRY,
&trace);
if (r > 0) return;
- if (r == -ESTALE) {
+ if (r == -CEPHFS_ESTALE) {
mdcache->find_ino_peers(destpath.get_ino(), new C_MDS_RetryRequest(mdcache, mdr),
mdr->peer_to_mds, true);
return;
return;
if (!diri->is_dir()) {
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
dout(10) << "lssnap on " << *diri << dendl;
}
if (!mds->mdsmap->allows_snaps()) {
// you can't make snapshots until you set an option right now
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
// dir only
if (!diri->is_dir()) {
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
if (diri->is_system() && !diri->is_root()) {
// no snaps in system dirs (root is ok)
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
dout(20) << "mksnap " << snapname << " on " << *diri << " denied to uid " << mdr->client_request->get_caller_uid() << dendl;
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
if (inodeno_t subvol_ino = diri->find_snaprealm()->get_subvolume_ino();
(subvol_ino && subvol_ino != diri->ino())) {
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
// we don't allow any more if we are already at or beyond the limit
if (diri->snaprealm &&
diri->snaprealm->get_snaps().size() >= max_snaps_per_dir) {
- respond_to_request(mdr, -EMLINK);
+ respond_to_request(mdr, -CEPHFS_EMLINK);
return;
}
// make sure name is unique
if (diri->snaprealm &&
diri->snaprealm->exists(snapname)) {
- respond_to_request(mdr, -EEXIST);
+ respond_to_request(mdr, -CEPHFS_EEXIST);
return;
}
if (snapname.length() == 0 ||
snapname[0] == '_') {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
return;
if (!diri->is_dir()) {
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
dout(20) << "rmsnap " << snapname << " on " << *diri << " denied to uid " << mdr->client_request->get_caller_uid() << dendl;
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
// does snap exist?
if (snapname.length() == 0 || snapname[0] == '_') {
- respond_to_request(mdr, -EINVAL); // can't prune a parent snap, currently.
+ respond_to_request(mdr, -CEPHFS_EINVAL); // can't prune a parent snap, currently.
return;
}
if (!diri->snaprealm || !diri->snaprealm->exists(snapname)) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return;
}
snapid_t snapid = diri->snaprealm->resolve_snapname(snapname, diri->ino());
{
const cref_t<MClientRequest> &req = mdr->client_request;
if (req->get_filepath().get_ino() != req->get_filepath2().get_ino()) {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
return;
if (!diri->is_dir()) { // dir only
- respond_to_request(mdr, -ENOTDIR);
+ respond_to_request(mdr, -CEPHFS_ENOTDIR);
return;
}
if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid ||
mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
- respond_to_request(mdr, -EPERM);
+ respond_to_request(mdr, -CEPHFS_EPERM);
return;
}
dout(10) << "renamesnap " << srcname << "->" << dstname << " on " << *diri << dendl;
if (srcname.length() == 0 || srcname[0] == '_') {
- respond_to_request(mdr, -EINVAL); // can't rename a parent snap.
+ respond_to_request(mdr, -CEPHFS_EINVAL); // can't rename a parent snap.
return;
}
if (!diri->snaprealm || !diri->snaprealm->exists(srcname)) {
- respond_to_request(mdr, -ENOENT);
+ respond_to_request(mdr, -CEPHFS_ENOENT);
return;
}
if (dstname.length() == 0 || dstname[0] == '_') {
- respond_to_request(mdr, -EINVAL);
+ respond_to_request(mdr, -CEPHFS_EINVAL);
return;
}
if (diri->snaprealm->exists(dstname)) {
- respond_to_request(mdr, -EEXIST);
+ respond_to_request(mdr, -CEPHFS_EEXIST);
return;
}
MDLog *mdlog;
PerfCounters *logger = nullptr;
- // OSDMap full status, used to generate ENOSPC on some operations
+ // OSDMap full status, used to generate CEPHFS_ENOSPC on some operations
bool is_full = false;
// State for while in reconnect
inode->layout.pool_ns.length() &&
!connection->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2)) {
dout(10) << __func__ << " client doesn't support FS_FILE_LAYOUT_V2" << dendl;
- return -EIO;
+ return -CEPHFS_EIO;
}
if (!auth_caps.is_capable(path, inode->uid, inode->gid, inode->mode,
caller_uid, caller_gid, caller_gid_list, mask,
new_uid, new_gid,
info.inst.addr)) {
- return -EACCES;
+ return -CEPHFS_EACCES;
}
return 0;
}
id = strict_strtoll(s.c_str(), 10, &err);
if (!err.empty()) {
*ss << "Invalid filter '" << s << "'";
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
return 0;
}
id = strict_strtoll(v.c_str(), 10, &err);
if (!err.empty()) {
*ss << err;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
} else if (k == "reconnecting") {
/**
* Strict boolean parser. Allow true/false/0/1.
- * Anything else is -EINVAL.
+ * Anything else is -CEPHFS_EINVAL.
*/
auto is_true = [](std::string_view bstr, bool *out) -> bool
{
*out = false;
return 0;
} else {
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
};
set_reconnecting(bval);
} else {
*ss << "Invalid boolean value '" << v << "'";
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
} else {
*ss << "Invalid filter key '" << k << "'";
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
}
{
if (!is_synced()) {
dout(5) << "dump_cache: not synced" << dendl;
- return -EINVAL;
+ return -CEPHFS_EINVAL;
}
map<snapid_t, const SnapInfo*> snaps;
C_IO_PurgeStrayPurged(StrayManager *sm_, CDentry *d, bool oh) :
StrayManagerIOContext(sm_), dn(d), only_head(oh) { }
void finish(int r) override {
- ceph_assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -CEPHFS_ENOENT);
sm->_purge_stray_purged(dn, only_head);
}
void print(ostream& out) const override {
if (in->state_test(CInode::STATE_MISSINGOBJS)) {
mds->clog->error() << "previous attempt at committing dirfrag of ino "
<< in->ino() << " has failed, missing object";
- mds->handle_write_error(-ENOENT);
+ mds->handle_write_error(-CEPHFS_ENOENT);
}
return false; // not until some snaps are deleted.
}
"""
from cpython cimport PyObject, ref, exc
-from libc cimport errno
from libc.stdint cimport *
from libc.stdlib cimport malloc, realloc, free
from collections import namedtuple
from datetime import datetime
-import errno
import os
import time
CEPH_SETATTR_CTIME = 0x40
CEPH_SETATTR_BTIME = 0x200
+# errno definitions
+cdef enum:
+ CEPHFS_EBLOCKLISTED = 108
+ CEPHFS_EPERM = 1
+ CEPHFS_ESTALE = 116
+ CEPHFS_ENOSPC = 28
+ CEPHFS_ETIMEDOUT = 110
+ CEPHFS_EIO = 5
+ CEPHFS_ENOTCONN = 107
+ CEPHFS_EEXIST = 17
+ CEPHFS_EINTR = 4
+ CEPHFS_EINVAL = 22
+ CEPHFS_EBADF = 9
+ CEPHFS_EROFS = 30
+ CEPHFS_EAGAIN = 11
+ CEPHFS_EACCES = 13
+ CEPHFS_ELOOP = 40
+ CEPHFS_EISDIR = 21
+ CEPHFS_ENOENT = 2
+ CEPHFS_ENOTDIR = 20
+ CEPHFS_ENAMETOOLONG = 36
+ CEPHFS_EBUSY = 16
+ CEPHFS_EDQUOT = 122
+ CEPHFS_EFBIG = 27
+ CEPHFS_ERANGE = 34
+ CEPHFS_ENXIO = 6
+ CEPHFS_ECANCELED = 125
+ CEPHFS_ENODATA = 61
+ CEPHFS_EOPNOTSUPP = 95
+ CEPHFS_EXDEV = 18
+ CEPHFS_ENOMEM = 12
+ CEPHFS_ENOTRECOVERABLE = 131
+ CEPHFS_ENOSYS = 38
+ CEPHFS_EWOULDBLOCK = CEPHFS_EAGAIN
+ CEPHFS_ENOTEMPTY = 39
+ CEPHFS_EDEADLK = 35
+ CEPHFS_EDEADLOCK = CEPHFS_EDEADLK
+ CEPHFS_EDOM = 33
+ CEPHFS_EMLINK = 31
+ CEPHFS_ETIME = 62
+ CEPHFS_EOLDSNAPC = 85
+
cdef extern from "Python.h":
# These are in cpython/string.pxd, but use "object" types instead of
# PyObject*, which invokes assumptions in cpython that we need to
pass
-IF UNAME_SYSNAME == "FreeBSD":
- cdef errno_to_exception = {
- errno.EPERM : PermissionError,
- errno.ENOENT : ObjectNotFound,
- errno.EIO : IOError,
- errno.ENOSPC : NoSpace,
- errno.EEXIST : ObjectExists,
- errno.ENOATTR : NoData,
- errno.EINVAL : InvalidValue,
- errno.EOPNOTSUPP : OperationNotSupported,
- errno.ERANGE : OutOfRange,
- errno.EWOULDBLOCK: WouldBlock,
- errno.ENOTEMPTY : ObjectNotEmpty,
- errno.EDQUOT : DiskQuotaExceeded,
- }
-ELSE:
- cdef errno_to_exception = {
- errno.EPERM : PermissionError,
- errno.ENOENT : ObjectNotFound,
- errno.EIO : IOError,
- errno.ENOSPC : NoSpace,
- errno.EEXIST : ObjectExists,
- errno.ENODATA : NoData,
- errno.EINVAL : InvalidValue,
- errno.EOPNOTSUPP : OperationNotSupported,
- errno.ERANGE : OutOfRange,
- errno.EWOULDBLOCK: WouldBlock,
- errno.ENOTEMPTY : ObjectNotEmpty,
- errno.ENOTDIR : NotDirectory,
- errno.EDQUOT : DiskQuotaExceeded,
- }
+cdef errno_to_exception = {
+ CEPHFS_EPERM : PermissionError,
+ CEPHFS_ENOENT : ObjectNotFound,
+ CEPHFS_EIO : IOError,
+ CEPHFS_ENOSPC : NoSpace,
+ CEPHFS_EEXIST : ObjectExists,
+ CEPHFS_ENODATA : NoData,
+ CEPHFS_EINVAL : InvalidValue,
+ CEPHFS_EOPNOTSUPP : OperationNotSupported,
+ CEPHFS_ERANGE : OutOfRange,
+ CEPHFS_EWOULDBLOCK: WouldBlock,
+ CEPHFS_ENOTEMPTY : ObjectNotEmpty,
+ CEPHFS_ENOTDIR : NotDirectory,
+ CEPHFS_EDQUOT : DiskQuotaExceeded,
+}
cdef make_ex(ret, msg):
def __enter__(self):
if not self.handle:
- raise make_ex(errno.EBADF, "dir is not open")
+ raise make_ex(CEPHFS_EBADF, "dir is not open")
self.lib.require_state("mounted")
with nogil:
ceph_rewinddir(self.lib.cluster, self.handle)
def rewinddir(self):
if not self.handle:
- raise make_ex(errno.EBADF, "dir is not open")
+ raise make_ex(CEPHFS_EBADF, "dir is not open")
self.lib.require_state("mounted")
with nogil:
ceph_rewinddir(self.lib.cluster, self.handle)
def telldir(self):
if not self.handle:
- raise make_ex(errno.EBADF, "dir is not open")
+ raise make_ex(CEPHFS_EBADF, "dir is not open")
self.lib.require_state("mounted")
with nogil:
ret = ceph_telldir(self.lib.cluster, self.handle)
def seekdir(self, offset):
if not self.handle:
- raise make_ex(errno.EBADF, "dir is not open")
+ raise make_ex(CEPHFS_EBADF, "dir is not open")
if not isinstance(offset, int):
raise TypeError('offset must be an int')
self.lib.require_state("mounted")
self.state = "uninitialized"
if rados_inst is not None:
if auth_id is not None or conffile is not None or conf is not None:
- raise make_ex(errno.EINVAL,
+ raise make_ex(CEPHFS_EINVAL,
"May not pass RADOS instance as well as other configuration")
self.create_with_rados(rados_inst)
ret = ceph_conf_get(self.cluster, _option, ret_buf, length)
if ret == 0:
return decode_cstr(ret_buf)
- elif ret == -errno.ENAMETOOLONG:
+ elif ret == -CEPHFS_ENAMETOOLONG:
length = length * 2
- elif ret == -errno.ENOENT:
+ elif ret == -CEPHFS_ENOENT:
return None
else:
raise make_ex(ret, "error calling conf_get")
elif access_flags > 0 and c == '+':
access_flags = 3;
else:
- raise make_ex(errno.EOPNOTSUPP,
+ raise make_ex(CEPHFS_EOPNOTSUPP,
"open flags doesn't support %s" % c)
if access_flags == 1:
if ret > 0:
dict_result["pool_name"] = decode_cstr(buf)
return dict_result
- elif ret == -errno.ERANGE:
+ elif ret == -CEPHFS_ERANGE:
buflen = buflen * 2
else:
raise make_ex(ret, "error in get_file_pool_name")
if ret > 0:
dict_result["pool_name"] = decode_cstr(buf)
break
- elif ret == -errno.ERANGE:
+ elif ret == -CEPHFS_ERANGE:
buflen = buflen * 2
else:
raise make_ex(ret, "error in get_default_data_pool_name")