namespace {
#ifdef WITH_SEASTAR
- ceph::common::ConfigProxy& conf(CephContext*) {
- return ceph::common::local_conf();
+ crimson::common::ConfigProxy& conf(CephContext*) {
+ return crimson::common::local_conf();
}
#else
ConfigProxy& conf(CephContext* cct) {
#ifdef WITH_SEASTAR
CephContext::CephContext()
- : _conf{ceph::common::local_conf()},
- _perf_counters_collection{ceph::common::local_perf_coll()},
+ : _conf{crimson::common::local_conf()},
+ _perf_counters_collection{crimson::common::local_perf_coll()},
_crypto_random{std::make_unique<CryptoRandom>()}
{}
}
CryptoRandom* random() const;
PerfCountersCollectionImpl* get_perfcounters_collection();
- ceph::common::ConfigProxy& _conf;
- ceph::common::PerfCountersCollection& _perf_counters_collection;
+ crimson::common::ConfigProxy& _conf;
+ crimson::common::PerfCountersCollection& _perf_counters_collection;
CephContext* get();
void put();
private:
#pragma once
#ifdef WITH_SEASTAR
-namespace ceph::common {
+namespace crimson::common {
class ConfigProxy;
}
-using ConfigProxy = ceph::common::ConfigProxy;
+using ConfigProxy = crimson::common::ConfigProxy;
#else
class ConfigProxy;
#endif
subsys.set_log_level(which, log);
subsys.set_gather_level(which, gather);
#if WITH_SEASTAR
- ceph::get_logger(which).set_level(ceph::to_log_level(log));
+ crimson::get_logger(which).set_level(crimson::to_log_level(log));
#endif
}
}
#ifdef WITH_SEASTAR
#define dout_impl(cct, sub, v) \
do { \
- if (ceph::common::local_conf()->subsys.should_gather(sub, v)) { \
- seastar::logger& _logger = ceph::get_logger(sub); \
+ if (crimson::common::local_conf()->subsys.should_gather(sub, v)) { \
+ seastar::logger& _logger = crimson::get_logger(sub); \
const auto _lv = v; \
std::ostringstream _out; \
std::ostream* _dout = &_out;
#define dendl_impl \
""; \
- _logger.log(ceph::to_log_level(_lv), \
+ _logger.log(crimson::to_log_level(_lv), \
_out.str().c_str()); \
} \
} while (0)
class CryptoKey;
-namespace ceph::auth {
+namespace crimson::auth {
class error : public std::logic_error {
public:
/// Build an authentication request to begin the handshake
///
/// @throw auth::error if unable to build the request
- virtual auth_request_t get_auth_request(ceph::net::ConnectionRef conn,
+ virtual auth_request_t get_auth_request(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta) = 0;
/// Handle server's request to continue the handshake
///
/// @throw auth::error if unable to build the request
virtual ceph::bufferlist handle_auth_reply_more(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
const ceph::bufferlist& bl) = 0;
///
/// @return 0 if authenticated, a negative number otherwise
virtual int handle_auth_done(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint64_t global_id,
uint32_t con_mode,
/// @return 0 if will try next auth method, a negative number if we have no
/// more options
virtual int handle_auth_bad_method(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_modes) = 0;
};
-} // namespace ceph::auth
+} // namespace crimson::auth
struct AuthAuthorizeHandler;
-namespace ceph::auth {
+namespace crimson::auth {
class AuthServer {
public:
int auth_method) = 0;
// Handle an authentication request on an incoming connection
virtual int handle_auth_request(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
bool more, //< true if this is not the first part of the handshake
uint32_t auth_method,
bufferlist *reply) = 0;
};
-} // namespace ceph::auth
+} // namespace crimson::auth
#include "AuthClient.h"
#include "AuthServer.h"
-namespace ceph::auth {
+namespace crimson::auth {
class DummyAuthClientServer : public AuthClient,
public AuthServer {
}
AuthClient::auth_request_t get_auth_request(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta) override {
return {CEPH_AUTH_NONE, {CEPH_CON_MODE_CRC}, {}};
}
ceph::bufferlist handle_auth_reply_more(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
const bufferlist& bl) override {
ceph_abort();
}
int handle_auth_done(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint64_t global_id,
uint32_t con_mode,
}
int handle_auth_bad_method(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint32_t old_auth_method,
int result,
// server
int handle_auth_request(
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
bool more,
uint32_t auth_method,
}
};
-} // namespace ceph::auth
+} // namespace crimson::auth
#include "include/denc.h"
#include "crimson/common/config_proxy.h"
-namespace ceph::auth {
+namespace crimson::auth {
seastar::future<seastar::temporary_buffer<char>> read_file(const std::string& path)
{
seastar::future<KeyRing*> load_from_keyring(KeyRing* keyring)
{
std::vector<std::string> paths;
- boost::split(paths, ceph::common::local_conf()->keyring,
+ boost::split(paths, crimson::common::local_conf()->keyring,
boost::is_any_of(",;"));
std::pair<bool, std::string> found;
return seastar::map_reduce(paths, [](auto path) {
seastar::future<KeyRing*> load_from_keyfile(KeyRing* keyring)
{
- auto& path = ceph::common::local_conf()->keyfile;
+ auto& path = crimson::common::local_conf()->keyfile;
if (!path.empty()) {
return read_file(path).then([keyring](auto buf) {
EntityAuth ea;
ea.key.decode_base64(std::string(buf.begin(),
buf.end()));
- keyring->add(ceph::common::local_conf()->name, ea);
+ keyring->add(crimson::common::local_conf()->name, ea);
return seastar::make_ready_future<KeyRing*>(keyring);
});
} else {
seastar::future<KeyRing*> load_from_key(KeyRing* keyring)
{
- auto& key = ceph::common::local_conf()->key;
+ auto& key = crimson::common::local_conf()->key;
if (!key.empty()) {
EntityAuth ea;
ea.key.decode_base64(key);
- keyring->add(ceph::common::local_conf()->name, ea);
+ keyring->add(crimson::common::local_conf()->name, ea);
}
return seastar::make_ready_future<KeyRing*>(keyring);
}
-} // namespace ceph::auth
+} // namespace crimson::auth
class KeyRing;
-namespace ceph::auth {
+namespace crimson::auth {
// see KeyRing::from_ceph_context
seastar::future<KeyRing*> load_from_keyring(KeyRing* keyring);
seastar::future<KeyRing*> load_from_keyfile(KeyRing* keyring);
const char* file, int line,
const char* func)
{
- seastar::logger& logger = ceph::get_logger(0);
+ seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', ceph_assert(%s)\n"
"{}",
file, line, func, assertion,
std::vsnprintf(buf, sizeof(buf), msg, args);
va_end(args);
- seastar::logger& logger = ceph::get_logger(0);
+ seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', ceph_assert(%s)\n"
"{}\n{}\n",
file, line, func, assertion,
[[gnu::cold]] void __ceph_abort(const char* file, int line,
const char* func, const std::string& msg)
{
- seastar::logger& logger = ceph::get_logger(0);
+ seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', abort(%s)\n"
"{}",
file, line, func, msg,
std::vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
- seastar::logger& logger = ceph::get_logger(0);
+ seastar::logger& logger = crimson::get_logger(0);
logger.error("{}:{} : In function '{}', abort()\n"
"{}\n{}\n",
file, line, func,
class EntityName;
class AuthCapsInfo;
-namespace ceph::common {
+namespace crimson::common {
class AuthHandler {
public:
// the peer just got authorized
#include "config_proxy.h"
-namespace ceph::common {
+namespace crimson::common {
ConfigProxy::ConfigProxy(const EntityName& name, std::string_view cluster)
{
#include "common/config_obs_mgr.h"
#include "common/errno.h"
-namespace ceph::common {
+namespace crimson::common {
// a facade for managing config. each shard has its own copy of ConfigProxy.
//
seastar::future<> parse_argv(std::vector<const char*>& argv) {
// we could pass whatever is unparsed to seastar, but seastar::app_template
// is used for driving the seastar application, and
- // ceph::common::ConfigProxy is not available until seastar engine is up
+ // crimson::common::ConfigProxy is not available until seastar engine is up
// and running, so we have to feed the command line args to app_template
// first, then pass them to ConfigProxy.
return do_change([&argv, this](ConfigValues& values) {
#undef DEFAULT_SUBSYS
};
-namespace ceph {
+namespace crimson {
seastar::logger& get_logger(int subsys) {
assert(subsys < ceph_subsys_max);
return loggers[subsys];
#include <seastar/util/log.hh>
#include "common/subsys_types.h"
-namespace ceph {
+namespace crimson {
seastar::logger& get_logger(int subsys);
static inline seastar::log_level to_log_level(int level) {
if (level < 0) {
#include "perf_counters_collection.h"
-namespace ceph::common {
+namespace crimson::common {
PerfCountersCollection::PerfCountersCollection()
{
perf_collection = std::make_unique<PerfCountersCollectionImpl>();
#include "common/perf_counters.h"
#include <seastar/core/sharded.hh>
-namespace ceph::common {
+namespace crimson::common {
class PerfCountersCollection: public seastar::sharded<PerfCountersCollection>
{
using ShardedPerfCountersCollection = seastar::sharded<PerfCountersCollection>;
namespace {
seastar::logger& logger()
{
- return ceph::get_logger(ceph_subsys_mgrc);
- }
- template<typename Message, typename... Args>
- Ref<Message> make_message(Args&&... args)
- {
- // Message inherits from RefCountedObject, whose nref is 1 when it is
- // constructed, so we pass "add_ref = false" to intrusive_ptr's ctor
- return {new Message{std::forward<Args>(args)...}, false};
+ return crimson::get_logger(ceph_subsys_mgrc);
}
}
-using ceph::common::local_conf;
+using crimson::common::local_conf;
-namespace ceph::mgr
+namespace crimson::mgr
{
-Client::Client(ceph::net::Messenger& msgr,
+Client::Client(crimson::net::Messenger& msgr,
WithStats& with_stats)
: msgr{msgr},
with_stats{with_stats},
});
}
-seastar::future<> Client::ms_dispatch(ceph::net::Connection* conn,
+seastar::future<> Client::ms_dispatch(crimson::net::Connection* conn,
MessageRef m)
{
switch(m->get_type()) {
}
}
-seastar::future<> Client::ms_handle_reset(ceph::net::ConnectionRef c)
+seastar::future<> Client::ms_handle_reset(crimson::net::ConnectionRef c)
{
if (conn == c) {
return reconnect();
[this](auto xconn) {
conn = xconn->release();
// ask for the mgrconfigure message
- auto m = make_message<MMgrOpen>();
+ auto m = ceph::make_message<MMgrOpen>();
m->daemon_name = local_conf()->name.get_id();
return conn->send(std::move(m));
});
});
}
-seastar::future<> Client::handle_mgr_map(ceph::net::Connection*,
+seastar::future<> Client::handle_mgr_map(crimson::net::Connection*,
Ref<MMgrMap> m)
{
mgrmap = m->get_map();
}
}
-seastar::future<> Client::handle_mgr_conf(ceph::net::Connection* conn,
+seastar::future<> Client::handle_mgr_conf(crimson::net::Connection* conn,
Ref<MMgrConfigure> m)
{
logger().info("{} {}", __func__, *m);
#include "mon/MgrMap.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
-namespace ceph::net {
+namespace crimson::net {
class Messenger;
}
class MMgrMap;
class MMgrConfigure;
-namespace ceph::mgr
+namespace crimson::mgr
{
// implement WithStats if you want to report stats to mgr periodically
virtual ~WithStats() {}
};
-class Client : public ceph::net::Dispatcher {
+class Client : public crimson::net::Dispatcher {
public:
- Client(ceph::net::Messenger& msgr,
+ Client(crimson::net::Messenger& msgr,
WithStats& with_stats);
seastar::future<> start();
seastar::future<> stop();
private:
- seastar::future<> ms_dispatch(ceph::net::Connection* conn,
+ seastar::future<> ms_dispatch(crimson::net::Connection* conn,
Ref<Message> m) override;
- seastar::future<> ms_handle_reset(ceph::net::ConnectionRef conn) override;
- seastar::future<> handle_mgr_map(ceph::net::Connection* conn,
+ seastar::future<> ms_handle_reset(crimson::net::ConnectionRef conn) override;
+ seastar::future<> handle_mgr_map(crimson::net::Connection* conn,
Ref<MMgrMap> m);
- seastar::future<> handle_mgr_conf(ceph::net::Connection* conn,
+ seastar::future<> handle_mgr_conf(crimson::net::Connection* conn,
Ref<MMgrConfigure> m);
seastar::future<> reconnect();
void report();
private:
MgrMap mgrmap;
- ceph::net::Messenger& msgr;
+ crimson::net::Messenger& msgr;
WithStats& with_stats;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
std::chrono::seconds report_period{0};
seastar::timer<seastar::lowres_clock> report_timer;
seastar::gate gate;
namespace {
seastar::logger& logger()
{
- return ceph::get_logger(ceph_subsys_monc);
- }
-
- template<typename Message, typename... Args>
- Ref<Message> make_message(Args&&... args)
- {
- return {new Message{std::forward<Args>(args)...}, false};
+ return crimson::get_logger(ceph_subsys_monc);
}
}
-namespace ceph::mon {
+namespace crimson::mon {
-using ceph::common::local_conf;
+using crimson::common::local_conf;
class Connection {
public:
Connection(const AuthRegistry& auth_registry,
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
KeyRing* keyring);
enum class AuthResult {
success = 0,
seastar::future<> renew_tickets();
seastar::future<> renew_rotating_keyring();
- ceph::net::ConnectionRef get_conn();
+ crimson::net::ConnectionRef get_conn();
private:
seastar::future<> setup_session(epoch_t epoch,
const EntityName& name);
- std::unique_ptr<AuthClientHandler> create_auth(ceph::auth::method_t,
+ std::unique_ptr<AuthClientHandler> create_auth(crimson::auth::method_t,
uint64_t global_id,
const EntityName& name,
uint32_t want_keys);
// v2
using clock_t = seastar::lowres_system_clock;
clock_t::time_point auth_start;
- ceph::auth::method_t auth_method = 0;
+ crimson::auth::method_t auth_method = 0;
seastar::promise<AuthResult> auth_done;
// v1 and v2
const AuthRegistry& auth_registry;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
std::unique_ptr<AuthClientHandler> auth;
std::unique_ptr<RotatingKeyRing> rotating_keyring;
uint64_t global_id = 0;
};
Connection::Connection(const AuthRegistry& auth_registry,
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
KeyRing* keyring)
: auth_registry{auth_registry},
conn{conn},
if (r != AuthResult::success) {
throw std::system_error(
make_error_code(
- ceph::net::error::negotiation_failure));
+ crimson::net::error::negotiation_failure));
}
});
}
{
auto now = clock_t::now();
auto ttl = std::chrono::seconds{
- static_cast<long>(ceph::common::local_conf()->auth_service_ticket_ttl)};
+ static_cast<long>(crimson::common::local_conf()->auth_service_ticket_ttl)};
auto cutoff = now - ttl / 4;
if (!rotating_keyring->need_new_secrets(utime_t(cutoff))) {
return seastar::now();
return do_auth(request_t::rotating).then([](AuthResult r) {
if (r != AuthResult::success) {
throw std::system_error(make_error_code(
- ceph::net::error::negotiation_failure));
+ crimson::net::error::negotiation_failure));
}
});
}
}
std::unique_ptr<AuthClientHandler>
-Connection::create_auth(ceph::auth::method_t protocol,
+Connection::create_auth(crimson::auth::method_t protocol,
uint64_t global_id,
const EntityName& name,
uint32_t want_keys)
if (!auth) {
logger().error("no handler for protocol {}", protocol);
throw std::system_error(make_error_code(
- ceph::net::error::negotiation_failure));
+ crimson::net::error::negotiation_failure));
}
auth->init(name);
auth->set_want_keys(want_keys);
Connection::setup_session(epoch_t epoch,
const EntityName& name)
{
- auto m = make_message<MAuth>();
+ auto m = ceph::make_message<MAuth>();
m->protocol = CEPH_AUTH_UNKNOWN;
m->monmap_epoch = epoch;
__u8 struct_v = 1;
encode(struct_v, m->auth_payload);
- std::vector<ceph::auth::method_t> auth_methods;
+ std::vector<crimson::auth::method_t> auth_methods;
auth_registry.get_supported_methods(conn->get_peer_type(), &auth_methods);
encode(auth_methods, m->auth_payload);
encode(name, m->auth_payload);
if (int ret = auth->build_request(m->auth_payload); ret) {
logger().error("missing/bad key for '{}'", local_conf()->name);
throw std::system_error(make_error_code(
- ceph::net::error::negotiation_failure));
+ crimson::net::error::negotiation_failure));
}
break;
default:
{
// choose method
auth_method = [&] {
- std::vector<ceph::auth::method_t> methods;
+ std::vector<crimson::auth::method_t> methods;
auth_registry.get_supported_methods(conn->get_peer_type(), &methods);
if (methods.empty()) {
logger().info("get_auth_request no methods is supported");
- throw ceph::auth::error("no methods is supported");
+ throw crimson::auth::error("no methods is supported");
}
return methods.front();
}();
&modes);
logger().info("method {} preferred_modes {}", auth_method, modes);
if (modes.empty()) {
- throw ceph::auth::error("no modes is supported");
+ throw crimson::auth::error("no modes is supported");
}
auth = create_auth(auth_method, global_id, entity_name, want_keys);
return {session_key, connection_secret, reply};
} else if (r < 0) {
logger().error(" handle_response returned {}", r);
- throw ceph::auth::error("unable to build auth");
+ throw crimson::auth::error("unable to build auth");
} else {
logger().info("authenticated!");
std::terminate();
logger().error("server allowed_methods {} but i only support {}",
allowed_methods, auth_supported);
auth_done.set_exception(std::system_error(make_error_code(
- ceph::net::error::negotiation_failure)));
+ crimson::net::error::negotiation_failure)));
return -EACCES;
}
auth_method = *p;
return conn->get_peer_addr() == addr;
}
-ceph::net::ConnectionRef Connection::get_conn() {
+crimson::net::ConnectionRef Connection::get_conn() {
return conn;
}
-Client::Client(ceph::net::Messenger& messenger,
- ceph::common::AuthHandler& auth_handler)
+Client::Client(crimson::net::Messenger& messenger,
+ crimson::common::AuthHandler& auth_handler)
// currently, crimson is OSD-only
: want_keys{CEPH_ENTITY_TYPE_MON |
CEPH_ENTITY_TYPE_OSD |
Client::~Client() = default;
seastar::future<> Client::start() {
- entity_name = ceph::common::local_conf()->name;
+ entity_name = crimson::common::local_conf()->name;
auth_registry.refresh_config();
return load_keyring().then([this] {
- return monmap.build_initial(ceph::common::local_conf(), false);
+ return monmap.build_initial(crimson::common::local_conf(), false);
}).then([this] {
return authenticate();
}).then([this] {
if (!auth_registry.is_supported_method(msgr.get_mytype(), CEPH_AUTH_CEPHX)) {
return seastar::now();
} else {
- return ceph::auth::load_from_keyring(&keyring).then([](KeyRing* keyring) {
- return ceph::auth::load_from_keyfile(keyring);
+ return crimson::auth::load_from_keyring(&keyring).then([](KeyRing* keyring) {
+ return crimson::auth::load_from_keyfile(keyring);
}).then([](KeyRing* keyring) {
- return ceph::auth::load_from_key(keyring);
+ return crimson::auth::load_from_key(keyring);
}).then([](KeyRing*) {
return seastar::now();
});
}
seastar::future<>
-Client::ms_dispatch(ceph::net::Connection* conn, MessageRef m)
+Client::ms_dispatch(crimson::net::Connection* conn, MessageRef m)
{
// we only care about these message types
switch (m->get_type()) {
}
}
-seastar::future<> Client::ms_handle_reset(ceph::net::ConnectionRef conn)
+seastar::future<> Client::ms_handle_reset(crimson::net::ConnectionRef conn)
{
auto found = std::find_if(pending_conns.begin(), pending_conns.end(),
[peer_addr = conn->get_peer_addr()](auto& mc) {
}
-int Client::handle_auth_request(ceph::net::ConnectionRef con,
+int Client::handle_auth_request(crimson::net::ConnectionRef con,
AuthConnectionMetaRef auth_meta,
bool more,
uint32_t auth_method,
}
auth::AuthClient::auth_request_t
-Client::get_auth_request(ceph::net::ConnectionRef con,
+Client::get_auth_request(crimson::net::ConnectionRef con,
AuthConnectionMetaRef auth_meta)
{
logger().info("get_auth_request(con={}, auth_method={})",
return mc->is_my_peer(peer_addr);
});
if (found == pending_conns.end()) {
- throw ceph::auth::error{"unknown connection"};
+ throw crimson::auth::error{"unknown connection"};
}
return (*found)->get_auth_request(entity_name, want_keys);
} else {
// generate authorizer
if (!active_con) {
logger().error(" but no auth handler is set up");
- throw ceph::auth::error("no auth available");
+ throw crimson::auth::error("no auth available");
}
auto authorizer = active_con->get_authorizer(con->get_peer_type());
if (!authorizer) {
logger().error("failed to build_authorizer for type {}",
ceph_entity_type_name(con->get_peer_type()));
- throw ceph::auth::error("unable to build auth");
+ throw crimson::auth::error("unable to build auth");
}
auth_meta->authorizer.reset(authorizer);
auth_meta->auth_method = authorizer->protocol;
}
}
- ceph::bufferlist Client::handle_auth_reply_more(ceph::net::ConnectionRef conn,
+ceph::bufferlist Client::handle_auth_reply_more(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
const bufferlist& bl)
{
return mc->is_my_peer(peer_addr);
});
if (found == pending_conns.end()) {
- throw ceph::auth::error{"unknown connection"};
+ throw crimson::auth::error{"unknown connection"};
}
bufferlist reply;
tie(auth_meta->session_key, auth_meta->connection_secret, reply) =
// authorizer challenges
if (!active_con || !auth_meta->authorizer) {
logger().error("no authorizer?");
- throw ceph::auth::error("no auth available");
+ throw crimson::auth::error("no auth available");
}
auth_meta->authorizer->add_challenge(&cct, bl);
return auth_meta->authorizer->bl;
}
}
-int Client::handle_auth_done(ceph::net::ConnectionRef conn,
+int Client::handle_auth_done(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint64_t global_id,
uint32_t con_mode,
}
// Handle server's indication that the previous auth attempt failed
-int Client::handle_auth_bad_method(ceph::net::ConnectionRef conn,
+int Client::handle_auth_bad_method(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint32_t old_auth_method,
int result,
}
}
-seastar::future<> Client::handle_monmap(ceph::net::Connection* conn,
+seastar::future<> Client::handle_monmap(crimson::net::Connection* conn,
Ref<MMonMap> m)
{
monmap.decode(m->monmapbl);
}
}
-seastar::future<> Client::handle_auth_reply(ceph::net::Connection* conn,
+seastar::future<> Client::handle_auth_reply(crimson::net::Connection* conn,
Ref<MAuthReply> m)
{
logger().info(
seastar::future<> Client::handle_config(Ref<MConfig> m)
{
- return ceph::common::local_conf().set_mon_vals(m->config);
+ return crimson::common::local_conf().set_mon_vals(m->config);
}
std::vector<unsigned> Client::get_random_mons(unsigned n) const
mons.push_back(rank);
} else {
const auto parallel =
- ceph::common::local_conf().get_val<uint64_t>("mon_client_hunt_parallel");
+ crimson::common::local_conf().get_val<uint64_t>("mon_client_hunt_parallel");
mons = get_random_mons(parallel);
}
pending_conns.reserve(mons.size());
// sharded-messenger compatible mode assumes all connections running
// in one shard.
ceph_assert((*xconn)->shard_id() == seastar::engine().cpu_id());
- ceph::net::ConnectionRef conn = xconn->release();
+ crimson::net::ConnectionRef conn = xconn->release();
auto& mc = pending_conns.emplace_back(
std::make_unique<Connection>(auth_registry, conn, &keyring));
if (conn->get_peer_addr().is_msgr2()) {
});
}
-} // namespace ceph::mon
+} // namespace crimson::mon
#include "mon/MonSub.h"
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
-namespace ceph::net {
+namespace crimson::net {
class Messenger;
}
struct MLogAck;
struct MConfig;
-namespace ceph::mon {
+namespace crimson::mon {
class Connection;
-class Client : public ceph::net::Dispatcher,
- public ceph::auth::AuthClient,
- public ceph::auth::AuthServer
+class Client : public crimson::net::Dispatcher,
+ public crimson::auth::AuthClient,
+ public crimson::auth::AuthServer
{
EntityName entity_name;
KeyRing keyring;
seastar::timer<seastar::lowres_clock> timer;
seastar::gate tick_gate;
- ceph::net::Messenger& msgr;
+ crimson::net::Messenger& msgr;
// commands
using get_version_t = seastar::future<version_t, version_t>;
MonSub sub;
public:
- Client(ceph::net::Messenger&, ceph::common::AuthHandler&);
+ Client(crimson::net::Messenger&, crimson::common::AuthHandler&);
Client(Client&&);
~Client();
seastar::future<> start();
const std::vector<uint32_t>& preferred_modes) final;
AuthAuthorizeHandler* get_auth_authorize_handler(int peer_type,
int auth_method) final;
- int handle_auth_request(ceph::net::ConnectionRef conn,
+ int handle_auth_request(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
bool more,
uint32_t auth_method,
CephContext cct; // for auth_registry
AuthRegistry auth_registry;
- ceph::common::AuthHandler& auth_handler;
+ crimson::common::AuthHandler& auth_handler;
// AuthClient methods
- ceph::auth::AuthClient::auth_request_t
- get_auth_request(ceph::net::ConnectionRef conn,
+ crimson::auth::AuthClient::auth_request_t
+ get_auth_request(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta) final;
// Handle server's request to continue the handshake
- ceph::bufferlist handle_auth_reply_more(ceph::net::ConnectionRef conn,
+ ceph::bufferlist handle_auth_reply_more(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
const bufferlist& bl) final;
// Handle server's indication that authentication succeeded
- int handle_auth_done(ceph::net::ConnectionRef conn,
+ int handle_auth_done(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl) final;
// Handle server's indication that the previous auth attempt failed
- int handle_auth_bad_method(ceph::net::ConnectionRef conn,
+ int handle_auth_bad_method(crimson::net::ConnectionRef conn,
AuthConnectionMetaRef auth_meta,
uint32_t old_auth_method,
int result,
private:
void tick();
- seastar::future<> ms_dispatch(ceph::net::Connection* conn,
+ seastar::future<> ms_dispatch(crimson::net::Connection* conn,
MessageRef m) override;
- seastar::future<> ms_handle_reset(ceph::net::ConnectionRef conn) override;
+ seastar::future<> ms_handle_reset(crimson::net::ConnectionRef conn) override;
- seastar::future<> handle_monmap(ceph::net::Connection* conn,
+ seastar::future<> handle_monmap(crimson::net::Connection* conn,
Ref<MMonMap> m);
- seastar::future<> handle_auth_reply(ceph::net::Connection* conn,
+ seastar::future<> handle_auth_reply(crimson::net::Connection* conn,
Ref<MAuthReply> m);
seastar::future<> handle_subscribe_ack(Ref<MMonSubscribeAck> m);
seastar::future<> handle_get_version_reply(Ref<MMonGetVersionReply> m);
seastar::future<> _add_conn(unsigned rank, uint64_t global_id);
};
-} // namespace ceph::mon
+} // namespace crimson::mon
#include "include/msgr.h"
#include <chrono>
-namespace ceph::net {
+namespace crimson::net {
using namespace std::literals::chrono_literals;
#include "Fwd.h"
-namespace ceph::net {
+namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
class Interceptor;
return out;
}
-} // namespace ceph::net
+} // namespace crimson::net
class AuthAuthorizer;
-namespace ceph::net {
+namespace crimson::net {
class Dispatcher {
public:
}
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "Errors.h"
-namespace ceph::net {
+namespace crimson::net {
const std::error_category& net_category()
{
struct category : public std::error_category {
const char* name() const noexcept override {
- return "ceph::net";
+ return "crimson::net";
}
std::string message(int ev) const override {
return instance;
}
-} // namespace ceph::net
+} // namespace crimson::net
#include <system_error>
-namespace ceph::net {
+namespace crimson::net {
/// net error codes
enum class error {
return {static_cast<int>(e), net_category()};
}
-} // namespace ceph::net
+} // namespace crimson::net
namespace std {
/// enables implicit conversion to std::error_condition
template <>
-struct is_error_condition_enum<ceph::net::error> : public true_type {};
+struct is_error_condition_enum<crimson::net::error> : public true_type {};
} // namespace std
class AuthConnectionMeta;
using AuthConnectionMetaRef = seastar::lw_shared_ptr<AuthConnectionMeta>;
-namespace ceph::net {
+namespace crimson::net {
using msgr_tag_t = uint8_t;
using stop_t = seastar::stop_iteration;
});
}
-} // namespace ceph::net
+} // namespace crimson::net
#include "Fwd.h"
#include "msg/async/frames_v2.h"
-namespace ceph::net {
+namespace crimson::net {
enum class custom_bp_t : uint8_t {
BANNER_WRITE = 0,
virtual bp_action_t intercept(Connection& conn, Breakpoint bp) = 0;
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "Messenger.h"
#include "SocketMessenger.h"
-namespace ceph::net {
+namespace crimson::net {
seastar::future<Messenger*>
Messenger::create(const entity_name_t& name,
});
}
-} // namespace ceph::net
+} // namespace crimson::net
class AuthAuthorizer;
-namespace ceph::auth {
+namespace crimson::auth {
class AuthClient;
class AuthServer;
}
-namespace ceph::net {
+namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
class Interceptor;
#endif
-using Throttle = ceph::thread::Throttle;
+using Throttle = crimson::thread::Throttle;
using SocketPolicy = ceph::net::Policy<Throttle>;
class Messenger {
entity_name_t my_name;
entity_addrvec_t my_addrs;
uint32_t crc_flags = 0;
- ceph::auth::AuthClient* auth_client = nullptr;
- ceph::auth::AuthServer* auth_server = nullptr;
+ crimson::auth::AuthClient* auth_client = nullptr;
+ crimson::auth::AuthServer* auth_server = nullptr;
bool require_authorizer = true;
public:
crc_flags |= MSG_CRC_HEADER;
}
- ceph::auth::AuthClient* get_auth_client() const { return auth_client; }
- void set_auth_client(ceph::auth::AuthClient *ac) {
+ crimson::auth::AuthClient* get_auth_client() const { return auth_client; }
+ void set_auth_client(crimson::auth::AuthClient *ac) {
auth_client = ac;
}
- ceph::auth::AuthServer* get_auth_server() const { return auth_server; }
- void set_auth_server(ceph::auth::AuthServer *as) {
+ crimson::auth::AuthServer* get_auth_server() const { return auth_server; }
+ void set_auth_server(crimson::auth::AuthServer *as) {
auth_server = as;
}
return out;
}
-} // namespace ceph::net
+} // namespace crimson::net
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
}
-namespace ceph::net {
+namespace crimson::net {
Protocol::Protocol(proto_t type,
Dispatcher& dispatcher,
}
}
-} // namespace ceph::net
+} // namespace crimson::net
#include "Fwd.h"
#include "SocketConnection.h"
-namespace ceph::net {
+namespace crimson::net {
class Protocol {
public:
void write_event();
};
-} // namespace ceph::net
+} // namespace crimson::net
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
template <typename T>
auto len = p.get_ptr_and_advance(remaining, &buf);
if (!std::equal(buf, buf + len, b)) {
throw std::system_error(
- make_error_code(ceph::net::error::bad_connect_banner));
+ make_error_code(crimson::net::error::bad_connect_banner));
}
b += len;
}
}
void discard_up_to(std::deque<MessageRef>* queue,
- ceph::net::seq_num_t seq)
+ crimson::net::seq_num_t seq)
{
while (!queue->empty() &&
queue->front()->get_seq() < seq) {
} // namespace anonymous
-namespace ceph::net {
+namespace crimson::net {
ProtocolV1::ProtocolV1(Dispatcher& dispatcher,
SocketConnection& conn,
logger().error("{} my peer_addr {} doesn't match what peer advertized {}",
conn, conn.peer_addr, saddr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
conn.set_ephemeral_port(caddr.get_port(),
SocketConnection::side_t::connector);
logger().warn("{} peer sent a v2 address for me: {}",
conn, caddr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
caddr.set_type(entity_addr_t::TYPE_LEGACY);
return messenger.learned_addr(caddr, conn);
logger().error("{} we don't know how to reconnect to peer {}",
conn, conn.target_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
return socket->read(h.connect.authorizer_len);
}).then([this] (bufferlist authorizer) {
" which should be v1 and the same host with {}.",
conn, addr, conn.peer_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
conn.peer_addr = addr;
conn.target_addr = conn.peer_addr;
return seastar::now();
}
-} // namespace ceph::net
+} // namespace crimson::net
class AuthAuthorizer;
class AuthSessionHandler;
-namespace ceph::net {
+namespace crimson::net {
class ProtocolV1 final : public Protocol {
public:
private:
// connecting
void reset_session();
- seastar::future<stop_t> handle_connect_reply(ceph::net::msgr_tag_t tag);
+ seastar::future<stop_t> handle_connect_reply(crimson::net::msgr_tag_t tag);
seastar::future<stop_t> repeat_connect();
ceph::bufferlist get_auth_payload();
seastar::future<> fault();
};
-} // namespace ceph::net
+} // namespace crimson::net
// - integrity checks;
// - etc.
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
void abort_in_fault() {
- throw std::system_error(make_error_code(ceph::net::error::negotiation_failure));
+ throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
}
void abort_protocol() {
- throw std::system_error(make_error_code(ceph::net::error::protocol_aborted));
+ throw std::system_error(make_error_code(crimson::net::error::protocol_aborted));
}
-void abort_in_close(ceph::net::ProtocolV2& proto) {
+void abort_in_close(crimson::net::ProtocolV2& proto) {
(void) proto.close();
abort_protocol();
}
inline void expect_tag(const Tag& expected,
const Tag& actual,
- ceph::net::SocketConnection& conn,
+ crimson::net::SocketConnection& conn,
const char *where) {
if (actual != expected) {
logger().warn("{} {} received wrong tag: {}, expected {}",
}
inline void unexpected_tag(const Tag& unexpected,
- ceph::net::SocketConnection& conn,
+ crimson::net::SocketConnection& conn,
const char *where) {
logger().warn("{} {} received unexpected tag: {}",
conn, where, static_cast<uint32_t>(unexpected));
}
}
-namespace ceph::net {
+namespace crimson::net {
#ifdef UNIT_TESTS_BUILT
void intercept(Breakpoint bp, bp_type_t type,
return write_frame(frame).then([this] {
return handle_auth_reply();
});
- } catch (const ceph::auth::error& e) {
+ } catch (const crimson::auth::error& e) {
logger().error("{} get_initial_auth_request returned {}", conn, e);
dispatch_reset();
abort_in_close(*this);
logger().warn("{} peer identifies as {}, does not include {}",
conn, server_ident.addrs(), conn.target_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
server_cookie = server_ident.cookie();
logger().warn("{} peer advertises as {}, does not match {}",
conn, server_ident.addrs(), conn.peer_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
conn.set_peer_id(server_ident.gid());
conn.set_features(server_ident.supported_features() &
logger().warn("{} peer sent a legacy address for me: {}",
conn, _my_addr_from_peer);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
_my_addr_from_peer.set_type(entity_addr_t::TYPE_MSGR2);
return messenger.learned_addr(_my_addr_from_peer, conn);
client_ident.addrs().front() == entity_addr_t()) {
logger().warn("{} oops, client_ident.addrs() is empty", conn);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
if (!messenger.get_myaddrs().contains(client_ident.target_addr())) {
logger().warn("{} peer is trying to reach {} which is not us ({})",
conn, client_ident.target_addr(), messenger.get_myaddrs());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
// TODO: change peer_addr to entity_addrvec_t
entity_addr_t paddr = client_ident.addrs().front();
logger().warn("{} peer's address {} is not v2 or not the same host with {}",
conn, paddr, conn.target_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
conn.peer_addr = paddr;
logger().debug("{} UPDATE: peer_addr={}", conn, conn.peer_addr);
logger().warn("{} we don't know how to reconnect to peer {}",
conn, conn.target_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
conn.set_peer_id(client_ident.gid());
} else {
logger().warn("{} peer's address {} is not v2", conn, paddr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
if (conn.peer_addr == entity_addr_t()) {
conn.peer_addr = paddr;
" reconnect failed",
conn, paddr, conn.peer_addr);
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
peer_global_seq = reconnect.global_seq();
logger().warn("{} my_addr_from_peer {} port/nonce doesn't match myaddr {}",
conn, _my_addr_from_peer, messenger.get_myaddr());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
return messenger.learned_addr(_my_addr_from_peer, conn);
}).then([this] {
#endif
}
-} // namespace ceph::net
+} // namespace crimson::net
#include "msg/async/frames_v2.h"
#include "msg/async/crypto_onwire.h"
-namespace ceph::net {
+namespace crimson::net {
class ProtocolV2 final : public Protocol {
public:
void execute_server_wait();
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "crimson/common/log.h"
#include "Errors.h"
-namespace ceph::net {
+namespace crimson::net {
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
// an input_stream consumer that reads buffer segments into a bufferlist up to
break;
case bp_action_t::FAULT:
logger().info("[Test] got FAULT");
- throw std::system_error(make_error_code(ceph::net::error::negotiation_failure));
+ throw std::system_error(make_error_code(crimson::net::error::negotiation_failure));
case bp_action_t::BLOCK:
logger().info("[Test] got BLOCK");
return blocker->block();
}
#endif
-} // namespace ceph::net
+} // namespace crimson::net
#include "Interceptor.h"
#endif
-namespace ceph::net {
+namespace crimson::net {
class Socket;
using SocketFRef = seastar::foreign_ptr<std::unique_ptr<Socket>>;
#endif
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "Interceptor.h"
#endif
-using namespace ceph::net;
+using namespace crimson::net;
SocketConnection::SocketConnection(SocketMessenger& messenger,
Dispatcher& dispatcher,
SocketConnection::~SocketConnection() {}
-ceph::net::Messenger*
+crimson::net::Messenger*
SocketConnection::get_messenger() const {
return &messenger;
}
#include "crimson/net/Socket.h"
#include "crimson/thread/Throttle.h"
-namespace ceph::net {
+namespace crimson::net {
class Dispatcher;
class Protocol;
side = _side;
}
- ceph::net::Policy<ceph::thread::Throttle> policy;
+ ceph::net::Policy<crimson::thread::Throttle> policy;
/// the seq num of the last transmitted message
seq_num_t out_seq = 0;
friend class ProtocolV2;
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "Dispatcher.h"
#include "Socket.h"
-using namespace ceph::net;
-
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
}
+namespace crimson::net {
+
SocketMessenger::SocketMessenger(const entity_name_t& myname,
const std::string& logic_name,
uint32_t nonce,
});
}
-seastar::future<ceph::net::ConnectionXRef>
+seastar::future<crimson::net::ConnectionXRef>
SocketMessenger::connect(const entity_addr_t& peer_addr, const entity_type_t& peer_type)
{
// make sure we connect to a valid peer_addr
return seastar::now();
}
-seastar::foreign_ptr<ceph::net::ConnectionRef>
+seastar::foreign_ptr<crimson::net::ConnectionRef>
SocketMessenger::do_connect(const entity_addr_t& peer_addr, const entity_type_t& peer_type)
{
if (auto found = lookup_conn(peer_addr); found) {
logger().warn("{} peer_addr_for_me {} type/family/IP doesn't match myaddr {}",
conn, peer_addr_for_me, msgr.get_myaddr());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
return seastar::now();
}
logger().warn("{} peer_addr_for_me {} type doesn't match myaddr {}",
conn, peer_addr_for_me, msgr.get_myaddr());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
if (msgr.get_myaddr().get_family() != peer_addr_for_me.get_family()) {
logger().warn("{} peer_addr_for_me {} family doesn't match myaddr {}",
conn, peer_addr_for_me, msgr.get_myaddr());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
}
if (msgr.get_myaddr().is_blank_ip()) {
entity_addr_t addr = peer_addr_for_me;
logger().warn("{} peer_addr_for_me {} IP doesn't match myaddr {}",
conn, peer_addr_for_me, msgr.get_myaddr());
throw std::system_error(
- make_error_code(ceph::net::error::bad_peer_address));
+ make_error_code(crimson::net::error::bad_peer_address));
} else {
return seastar::now();
}
return seed % seastar::smp::count;
}
-ceph::net::SocketConnectionRef SocketMessenger::lookup_conn(const entity_addr_t& addr)
+crimson::net::SocketConnectionRef SocketMessenger::lookup_conn(const entity_addr_t& addr)
{
if (auto found = connections.find(addr);
found != connections.end()) {
return ++msgr.global_seq;
});
}
+
+} // namespace crimson::net
#include "Messenger.h"
#include "SocketConnection.h"
-namespace ceph::net {
+namespace crimson::net {
class SocketMessenger final : public Messenger, public seastar::peering_sharded_service<SocketMessenger> {
const int master_sid;
}
};
-} // namespace ceph::net
+} // namespace crimson::net
#include "cyan_object.h"
-namespace ceph::os
+namespace crimson::os
{
Collection::Collection(const coll_t& c)
Collection::ObjectRef Collection::create_object() const
{
- return new ceph::os::Object{};
+ return new crimson::os::Object{};
}
Collection::ObjectRef Collection::get_object(ghobject_t oid)
#include "futurized_collection.h"
-namespace ceph::os {
+namespace crimson::os {
class Object;
/**
#include "cyan_object.h"
#include "include/encoding.h"
-namespace ceph::os {
+namespace crimson::os {
size_t Object::get_size() const {
return data.length();
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "include/buffer.h"
-namespace ceph::os {
+namespace crimson::os {
struct Object : public boost::intrusive_ref_counter<
Object,
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_filestore);
+ return crimson::get_logger(ceph_subsys_filestore);
}
}
-using ceph::common::local_conf;
+using crimson::common::local_conf;
-namespace ceph::os {
+namespace crimson::os {
using ObjectRef = boost::intrusive_ptr<Object>;
{
logger().debug("{}", __func__);
store_statfs_t st;
- st.total = ceph::common::local_conf().get_val<Option::size_t>("memstore_device_bytes");
+ st.total = crimson::common::local_conf().get_val<Option::size_t>("memstore_device_bytes");
st.available = st.total - used_bytes;
return st;
}
}
seastar::future<> CyanStore::do_transaction(CollectionRef ch,
- Transaction&& t)
+ ceph::os::Transaction&& t)
{
+ using ceph::os::Transaction;
int r = 0;
try {
auto i = t.begin();
while (i.have_op()) {
- Transaction::Op* op = i.decode_op();
r = 0;
- switch (op->op) {
+ switch (auto op = i.decode_op(); op->op) {
case Transaction::OP_NOP:
break;
case Transaction::OP_REMOVE:
#include "futurized_store.h"
namespace ceph::os {
+class Transaction;
+}
+namespace crimson::os {
class Collection;
-class Transaction;
class CyanStore final : public FuturizedStore {
constexpr static unsigned MAX_KEYS_PER_OMAP_GET_CALL = 32;
seastar::future<std::vector<coll_t>> list_collections() final;
seastar::future<> do_transaction(CollectionRef ch,
- Transaction&& txn) final;
+ ceph::os::Transaction&& txn) final;
seastar::future<> write_meta(const std::string& key,
const std::string& value) final;
#include "osd/osd_types.h"
-namespace ceph::os {
+namespace crimson::os {
class FuturizedCollection
: public boost::intrusive_ref_counter<FuturizedCollection,
#include "futurized_store.h"
#include "cyan_store.h"
-namespace ceph::os {
+namespace crimson::os {
-std::unique_ptr<FuturizedStore> FuturizedStore::create(const std::string& type,
- const std::string& data)
+std::unique_ptr<FuturizedStore>
+FuturizedStore::create(const std::string& type,
+ const std::string& data)
{
if (type == "memstore") {
- return std::make_unique<ceph::os::CyanStore>(data);
+ return std::make_unique<crimson::os::CyanStore>(data);
} else {
ceph_abort_msgf("unsupported objectstore type: %s", type.c_str());
return {};
#include "osd/osd_types.h"
namespace ceph::os {
+class Transaction;
+}
+namespace crimson::os {
class FuturizedCollection;
-class Transaction;
class FuturizedStore {
public:
- // TODO: replace with the ceph::errorator concept
+ // TODO: replace with the crimson::errorator concept
template <class ConcreteExceptionT>
class Exception : public std::logic_error {
public:
virtual seastar::future<std::vector<coll_t>> list_collections() = 0;
virtual seastar::future<> do_transaction(CollectionRef ch,
- Transaction&& txn) = 0;
+ ceph::os::Transaction&& txn) = 0;
virtual seastar::future<> write_meta(const std::string& key,
const std::string& value) = 0;
#include <vector>
#endif
-namespace ceph::osd {
+namespace crimson::osd {
struct peer_shard_t {
pg_shard_t shard;
eversion_t last_complete_ondisk;
seastar::future<>
-ChainedDispatchers::ms_dispatch(ceph::net::Connection* conn,
+ChainedDispatchers::ms_dispatch(crimson::net::Connection* conn,
MessageRef m) {
return seastar::do_for_each(dispatchers, [conn, m](Dispatcher* dispatcher) {
return dispatcher->ms_dispatch(conn, m);
}
seastar::future<>
-ChainedDispatchers::ms_handle_accept(ceph::net::ConnectionRef conn) {
+ChainedDispatchers::ms_handle_accept(crimson::net::ConnectionRef conn) {
return seastar::do_for_each(dispatchers, [conn](Dispatcher* dispatcher) {
return dispatcher->ms_handle_accept(conn);
});
}
seastar::future<>
-ChainedDispatchers::ms_handle_connect(ceph::net::ConnectionRef conn) {
+ChainedDispatchers::ms_handle_connect(crimson::net::ConnectionRef conn) {
return seastar::do_for_each(dispatchers, [conn](Dispatcher* dispatcher) {
return dispatcher->ms_handle_connect(conn);
});
}
seastar::future<>
-ChainedDispatchers::ms_handle_reset(ceph::net::ConnectionRef conn) {
+ChainedDispatchers::ms_handle_reset(crimson::net::ConnectionRef conn) {
return seastar::do_for_each(dispatchers, [conn](Dispatcher* dispatcher) {
return dispatcher->ms_handle_reset(conn);
});
}
seastar::future<>
-ChainedDispatchers::ms_handle_remote_reset(ceph::net::ConnectionRef conn) {
+ChainedDispatchers::ms_handle_remote_reset(crimson::net::ConnectionRef conn) {
return seastar::do_for_each(dispatchers, [conn](Dispatcher* dispatcher) {
return dispatcher->ms_handle_remote_reset(conn);
});
// it requires changing the ms_ methods to return a bool. so as an intermediate
// solution, we are using an observer dispatcher to notify all the interested
// or unintersted parties.
-class ChainedDispatchers : public ceph::net::Dispatcher {
+class ChainedDispatchers : public crimson::net::Dispatcher {
std::deque<Dispatcher*> dispatchers;
public:
void push_front(Dispatcher* dispatcher) {
void push_back(Dispatcher* dispatcher) {
dispatchers.push_back(dispatcher);
}
- seastar::future<> ms_dispatch(ceph::net::Connection* conn, MessageRef m) override;
- seastar::future<> ms_handle_accept(ceph::net::ConnectionRef conn) override;
- seastar::future<> ms_handle_connect(ceph::net::ConnectionRef conn) override;
- seastar::future<> ms_handle_reset(ceph::net::ConnectionRef conn) override;
- seastar::future<> ms_handle_remote_reset(ceph::net::ConnectionRef conn) override;
+ seastar::future<> ms_dispatch(crimson::net::Connection* conn, MessageRef m) override;
+ seastar::future<> ms_handle_accept(crimson::net::ConnectionRef conn) override;
+ seastar::future<> ms_handle_connect(crimson::net::ConnectionRef conn) override;
+ seastar::future<> ms_handle_reset(crimson::net::ConnectionRef conn) override;
+ seastar::future<> ms_handle_remote_reset(crimson::net::ConnectionRef conn) override;
};
ECBackend::ECBackend(shard_id_t shard,
ECBackend::CollectionRef coll,
- ceph::osd::ShardServices& shard_services,
+ crimson::osd::ShardServices& shard_services,
const ec_profile_t&,
uint64_t)
: PGBackend{shard, coll, &shard_services.get_store()}
return seastar::make_ready_future<bufferlist>();
}
-seastar::future<ceph::osd::acked_peers_t>
+seastar::future<crimson::osd::acked_peers_t>
ECBackend::_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
eversion_t ver)
{
// todo
- return seastar::make_ready_future<ceph::osd::acked_peers_t>();
+ return seastar::make_ready_future<crimson::osd::acked_peers_t>();
}
public:
ECBackend(shard_id_t shard,
CollectionRef coll,
- ceph::osd::ShardServices& shard_services,
+ crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile,
uint64_t stripe_width);
private:
uint64_t off,
uint64_t len,
uint32_t flags) override;
- seastar::future<ceph::osd::acked_peers_t>
+ seastar::future<crimson::osd::acked_peers_t>
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
epoch_t min_epoch, epoch_t max_epoch,
eversion_t ver) final;
CollectionRef coll;
- ceph::os::FuturizedStore* store;
+ crimson::os::FuturizedStore* store;
};
#include <exception>
#include <system_error>
-namespace ceph::osd {
+namespace crimson::osd {
class error : private std::system_error {
public:
error(const std::errc ec)
input_output_error() : error(std::errc::io_error) {}
};
-} // namespace ceph::osd
+} // namespace crimson::osd
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
#include "heartbeat.h"
#include <boost/range/join.hpp>
#include "osd/OSDMap.h"
-using ceph::common::local_conf;
+using crimson::common::local_conf;
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-Heartbeat::Heartbeat(const ceph::osd::ShardServices& service,
- ceph::mon::Client& monc,
- ceph::net::Messenger& front_msgr,
- ceph::net::Messenger& back_msgr)
+Heartbeat::Heartbeat(const crimson::osd::ShardServices& service,
+ crimson::mon::Client& monc,
+ crimson::net::Messenger& front_msgr,
+ crimson::net::Messenger& back_msgr)
: service{service},
monc{monc},
front_msgr{front_msgr},
addr.set_port(0);
}
- using ceph::net::SocketPolicy;
+ using crimson::net::SocketPolicy;
front_msgr.set_policy(entity_name_t::TYPE_OSD,
SocketPolicy::stateless_server(0));
back_msgr.set_policy(entity_name_t::TYPE_OSD,
}
seastar::future<>
-Heartbeat::start_messenger(ceph::net::Messenger& msgr,
+Heartbeat::start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs)
{
return msgr.try_bind(addrs,
});
}
-seastar::future<> Heartbeat::ms_dispatch(ceph::net::Connection* conn,
+seastar::future<> Heartbeat::ms_dispatch(crimson::net::Connection* conn,
MessageRef m)
{
switch (m->get_type()) {
}
}
-seastar::future<> Heartbeat::ms_handle_reset(ceph::net::ConnectionRef conn)
+seastar::future<> Heartbeat::ms_handle_reset(crimson::net::ConnectionRef conn)
{
auto found = std::find_if(peers.begin(), peers.end(),
[conn](const peers_map_t::value_type& peer) {
});
}
-seastar::future<> Heartbeat::handle_osd_ping(ceph::net::Connection* conn,
+seastar::future<> Heartbeat::handle_osd_ping(crimson::net::Connection* conn,
Ref<MOSDPing> m)
{
switch (m->op) {
}
}
-seastar::future<> Heartbeat::handle_ping(ceph::net::Connection* conn,
+seastar::future<> Heartbeat::handle_ping(crimson::net::Connection* conn,
Ref<MOSDPing> m)
{
auto min_message = static_cast<uint32_t>(
return conn->send(reply);
}
-seastar::future<> Heartbeat::handle_reply(ceph::net::Connection* conn,
+seastar::future<> Heartbeat::handle_reply(crimson::net::Connection* conn,
Ref<MOSDPing> m)
{
const osd_id_t from = m->get_source().num();
const utime_t sent_stamp{now};
[[maybe_unused]] auto [reply, added] =
info.ping_history.emplace(sent_stamp, reply_t{deadline, 0});
- std::vector<ceph::net::ConnectionRef> conns{info.con_front,
- info.con_back};
+ std::vector<crimson::net::ConnectionRef> conns{info.con_front,
+ info.con_back};
return seastar::parallel_for_each(std::move(conns),
[sent_stamp, mnow, &reply=reply->second, this] (auto con) {
if (con) {
class MOSDPing;
-namespace ceph::osd {
+namespace crimson::osd {
class ShardServices;
}
-namespace ceph::mon {
+namespace crimson::mon {
class Client;
}
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
-class Heartbeat : public ceph::net::Dispatcher {
+class Heartbeat : public crimson::net::Dispatcher {
public:
using osd_id_t = int;
- Heartbeat(const ceph::osd::ShardServices& service,
- ceph::mon::Client& monc,
- ceph::net::Messenger& front_msgr,
- ceph::net::Messenger& back_msgr);
+ Heartbeat(const crimson::osd::ShardServices& service,
+ crimson::mon::Client& monc,
+ crimson::net::Messenger& front_msgr,
+ crimson::net::Messenger& back_msgr);
seastar::future<> start(entity_addrvec_t front,
entity_addrvec_t back);
void set_require_authorizer(bool);
// Dispatcher methods
- seastar::future<> ms_dispatch(ceph::net::Connection* conn,
+ seastar::future<> ms_dispatch(crimson::net::Connection* conn,
MessageRef m) override;
- seastar::future<> ms_handle_reset(ceph::net::ConnectionRef conn) override;
+ seastar::future<> ms_handle_reset(crimson::net::ConnectionRef conn) override;
private:
- seastar::future<> handle_osd_ping(ceph::net::Connection* conn,
+ seastar::future<> handle_osd_ping(crimson::net::Connection* conn,
Ref<MOSDPing> m);
- seastar::future<> handle_ping(ceph::net::Connection* conn,
+ seastar::future<> handle_ping(crimson::net::Connection* conn,
Ref<MOSDPing> m);
- seastar::future<> handle_reply(ceph::net::Connection* conn,
+ seastar::future<> handle_reply(crimson::net::Connection* conn,
Ref<MOSDPing> m);
seastar::future<> handle_you_died();
/// add enough reporters for fast failure detection
seastar::future<> add_reporter_peers(int whoami);
- seastar::future<> start_messenger(ceph::net::Messenger& msgr,
+ seastar::future<> start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs);
private:
- const ceph::osd::ShardServices& service;
- ceph::mon::Client& monc;
- ceph::net::Messenger& front_msgr;
- ceph::net::Messenger& back_msgr;
+ const crimson::osd::ShardServices& service;
+ crimson::mon::Client& monc;
+ crimson::net::Messenger& front_msgr;
+ crimson::net::Messenger& back_msgr;
seastar::timer<seastar::lowres_clock> timer;
// use real_clock so it can be converted to utime_t
};
struct PeerInfo {
/// peer connection (front)
- ceph::net::ConnectionRef con_front;
+ crimson::net::ConnectionRef con_front;
/// peer connection (back)
- ceph::net::ConnectionRef con_back;
+ crimson::net::ConnectionRef con_back;
/// time we sent our first ping request
clock::time_point first_tx;
/// last time we sent a ping request
#include "osd.h"
-using config_t = ceph::common::ConfigProxy;
+using config_t = crimson::common::ConfigProxy;
namespace fs = seastar::compat::filesystem;
void usage(const char* prog) {
return make_pair(std::move(ceph_args), std::move(app_args));
}
-using ceph::common::local_conf;
+using crimson::common::local_conf;
seastar::future<> make_keyring()
{
CEPH_ENTITY_TYPE_OSD,
&cluster_name,
&conf_file_list);
- seastar::sharded<ceph::osd::OSD> osd;
- seastar::sharded<ceph::net::SocketMessenger> cluster_msgr, client_msgr;
- seastar::sharded<ceph::net::SocketMessenger> hb_front_msgr, hb_back_msgr;
- using ceph::common::sharded_conf;
- using ceph::common::sharded_perf_coll;
+ seastar::sharded<crimson::osd::OSD> osd;
+ seastar::sharded<crimson::net::SocketMessenger> cluster_msgr, client_msgr;
+ seastar::sharded<crimson::net::SocketMessenger> hb_front_msgr, hb_back_msgr;
+ using crimson::common::sharded_conf;
+ using crimson::common::sharded_perf_coll;
try {
return app.run_deprecated(app_args.size(), const_cast<char**>(app_args.data()), [&] {
auto& config = app.configuration();
}
}
osd.start_single(whoami, nonce,
- reference_wrapper<ceph::net::Messenger>(cluster_msgr.local()),
- reference_wrapper<ceph::net::Messenger>(client_msgr.local()),
- reference_wrapper<ceph::net::Messenger>(hb_front_msgr.local()),
- reference_wrapper<ceph::net::Messenger>(hb_back_msgr.local())).get();
+ reference_wrapper<crimson::net::Messenger>(cluster_msgr.local()),
+ reference_wrapper<crimson::net::Messenger>(client_msgr.local()),
+ reference_wrapper<crimson::net::Messenger>(hb_front_msgr.local()),
+ reference_wrapper<crimson::net::Messenger>(hb_back_msgr.local())).get();
seastar::engine().at_exit([&] {
return osd.stop();
});
if (config.count("mkfs")) {
osd.invoke_on(
0,
- &ceph::osd::OSD::mkfs,
+ &crimson::osd::OSD::mkfs,
local_conf().get_val<uuid_d>("osd_uuid"),
local_conf().get_val<uuid_d>("fsid")).get();
}
if (config.count("mkkey") || config.count("mkfs")) {
seastar::engine().exit(0);
} else {
- osd.invoke_on(0, &ceph::osd::OSD::start).get();
+ osd.invoke_on(0, &crimson::osd::OSD::start).get();
}
});
});
try {
const auto& message = \
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->get_message();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->get_message();
*origin = message.get_orig_source_inst();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
OSDOp op{CEPH_OSD_OP_CREATE};
op.op.flags = (exclusive ? CEPH_OSD_OP_FLAG_EXCL : 0);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
// we're blocking here which presumes execution in Seastar's thread.
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
// we're blocking here which presumes execution in Seastar's thread.
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
- } catch (ceph::osd::error& e) {
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
op.op.extent.length = len;
op.op.flags = op_flags;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
- } catch (ceph::osd::error& e) {
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
outbl->claim(op.outdata);
op.op.flags = op_flags;
op.indata = *inbl;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
op.op.extent.length = inbl->length();
op.indata = *inbl;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
top.op.extent.offset = 0;
top.op.extent.length = 0;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(top).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(top).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
wop.indata = *inbl;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(wop).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(wop).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
op.op.extent.offset = ofs;
op.op.extent.length = 0;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
op.op.extent.offset = ofs;
op.op.extent.length = len;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
op.op.xattr.name_len = strlen(name);
op.indata.append(name, op.op.xattr.name_len);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
outbl->claim(op.outdata);
return outbl->length();
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
op.indata.append(name, op.op.xattr.name_len);
op.indata.append(*inbl);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
OSDOp op{op = CEPH_OSD_OP_ROLLBACK};
op.op.snap.snapid = snapid;
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
encode(start_obj, op.indata);
encode(max_to_get, op.indata);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
- } catch (ceph::osd::error& e) {
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
try {
encode(max_to_get, op.indata);
encode(filter_prefix, op.indata);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
- } catch (ceph::osd::error& e) {
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
try {
encode(k, op.indata);
}
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
- } catch (ceph::osd::error& e) {
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
std::map<std::string, ceph::bufferlist> m;
}
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
encode(*map, op.indata);
try {
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_osd_op(op).get();
return 0;
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
{
try {
const auto& message = \
- reinterpret_cast<ceph::osd::OpsExecuter*>(hctx)->get_message();
+ reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->get_message();
return message.get_features();
- } catch (ceph::osd::error& e) {
+ } catch (crimson::osd::error& e) {
return -e.code().value();
}
}
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
seastar::future<> OpsExecuter::do_op_call(OSDOp& osd_op)
{
bp.copy(osd_op.op.cls.indata_len, indata);
} catch (buffer::error&) {
logger().warn("call unable to decode class + method + indata");
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
// NOTE: opening a class can actually result in dlopen(), and thus
if (r) {
logger().warn("class {} open got {}", cname, cpp_strerror(r));
if (r == -ENOENT) {
- throw ceph::osd::operation_not_supported{};
+ throw crimson::osd::operation_not_supported{};
} else if (r == -EPERM) {
// propagate permission errors
- throw ceph::osd::permission_denied{};
+ throw crimson::osd::permission_denied{};
}
- throw ceph::osd::input_output_error{};
+ throw crimson::osd::input_output_error{};
}
ClassHandler::ClassMethod* method = cls->get_method(mname);
if (!method) {
logger().warn("call method {}.{} does not exist", cname, mname);
- throw ceph::osd::operation_not_supported{};
+ throw crimson::osd::operation_not_supported{};
}
const auto flags = method->get_flags();
if (!os->exists && (flags & CLS_METHOD_WR) == 0) {
- throw ceph::osd::object_not_found{};
+ throw crimson::osd::object_not_found{};
}
#if 0
indata, outdata);
if (num_read > prev_rd && !(flags & CLS_METHOD_RD)) {
logger().error("method tried to read object but is not marked RD");
- throw ceph::osd::input_output_error{};
+ throw crimson::osd::input_output_error{};
}
if (num_write > prev_wr && !(flags & CLS_METHOD_WR)) {
logger().error("method tried to update object but is not marked WR");
- throw ceph::osd::input_output_error{};
+ throw crimson::osd::input_output_error{};
}
// for write calls we never return data expect errors. For details refer
osd_op.outdata.claim_append(outdata);
}
if (ret < 0) {
- throw ceph::osd::make_error(ret);
+ throw crimson::osd::make_error(ret);
}
});
}
} else {
std::size_t dot = type.find(".");
if (dot == type.npos || dot == 0 || dot == type.size() - 1) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
const std::string class_name = type.substr(0, dot);
logger().warn("can't open class {}: {}", class_name, cpp_strerror(r));
if (r == -EPERM) {
// propogate permission error
- throw ceph::osd::permission_denied{};
+ throw crimson::osd::permission_denied{};
} else {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
} else {
ceph_assert(cls);
ClassHandler::ClassFilter * const class_filter = cls->get_filter(filter_name);
if (class_filter == nullptr) {
logger().warn("can't find filter {} in class {}", filter_name, class_name);
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
filter.reset(class_filter->fn());
// give an error rather than asserting out.
logger().warn("buggy class {} failed to construct filter {}",
class_name, filter_name);
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
}
int r = filter->init(iter);
if (r < 0) {
logger().warn("error initializing filter {}: {}", type, cpp_strerror(r));
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
// successfully constructed and initialized, return it.
return backend.list_objects(lower_bound, limit).then(
[&backend, filter, nspace](auto objects, auto next) {
auto in_my_namespace = [&nspace](const hobject_t& obj) {
- using ceph::common::local_conf;
+ using crimson::common::local_conf;
if (obj.get_namespace() == local_conf()->osd_hit_set_namespace) {
return false;
} else if (nspace == librados::all_nspaces) {
ceph::decode(mname, bp);
ceph::decode(type, bp);
} catch (const buffer::error&) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
auto filter = get_pgls_filter(type, bp);
});
case CEPH_OSD_OP_OMAPSETVALS:
if (!pg.get_pool().info.supports_omap()) {
- throw ceph::osd::operation_not_supported{};
+ throw crimson::osd::operation_not_supported{};
}
return do_write_op([&osd_op] (auto& backend, auto& os, auto& txn) {
return backend.omap_set_vals(os, osd_op, txn);
ceph::decode(mname, bp);
ceph::decode(type, bp);
} catch (const buffer::error&) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
auto filter = get_pgls_filter(type, bp);
}
}
-} // namespace ceph::osd
+} // namespace crimson::osd
class PGLSFilter;
class OSDOp;
-namespace ceph::osd {
+namespace crimson::osd {
class OpsExecuter {
// an operation can be divided into two stages: main and effect-exposing
// one. The former is performed immediately on call to `do_osd_op()` while
}
seastar::future<> dont_do_legacy_op() {
- throw ceph::osd::operation_not_supported();
+ throw crimson::osd::operation_not_supported();
}
public:
});
}
-} // namespace ceph::osd
+} // namespace crimson::osd
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
static constexpr int TICK_INTERVAL = 1;
}
-using ceph::common::local_conf;
-using ceph::os::FuturizedStore;
+using crimson::common::local_conf;
+using crimson::os::FuturizedStore;
-namespace ceph::osd {
+namespace crimson::osd {
OSD::OSD(int id, uint32_t nonce,
- ceph::net::Messenger& cluster_msgr,
- ceph::net::Messenger& public_msgr,
- ceph::net::Messenger& hb_front_msgr,
- ceph::net::Messenger& hb_back_msgr)
+ crimson::net::Messenger& cluster_msgr,
+ crimson::net::Messenger& public_msgr,
+ crimson::net::Messenger& hb_front_msgr,
+ crimson::net::Messenger& hb_back_msgr)
: whoami{id},
nonce{nonce},
// do this in background
beacon_timer{[this] { (void)send_beacon(); }},
cluster_msgr{cluster_msgr},
public_msgr{public_msgr},
- monc{new ceph::mon::Client{public_msgr, *this}},
- mgrc{new ceph::mgr::Client{public_msgr, *this}},
- store{ceph::os::FuturizedStore::create(
+ monc{new crimson::mon::Client{public_msgr, *this}},
+ mgrc{new crimson::mgr::Client{public_msgr, *this}},
+ store{crimson::os::FuturizedStore::create(
local_conf().get_val<std::string>("osd_objectstore"),
local_conf().get_val<std::string>("osd_data"))},
shard_services{*this, cluster_msgr, public_msgr, *monc, *mgrc, *store},
CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64 |
CEPH_FEATURE_OSDENC;
- using ceph::net::SocketPolicy;
+ using crimson::net::SocketPolicy;
public_msgr.set_default_policy(SocketPolicy::stateless_server(0));
public_msgr.set_policy(entity_name_t::TYPE_MON,
});
}
-seastar::future<> OSD::ms_dispatch(ceph::net::Connection* conn, MessageRef m)
+seastar::future<> OSD::ms_dispatch(crimson::net::Connection* conn, MessageRef m)
{
if (state.is_stopping()) {
return seastar::now();
}
}
-seastar::future<> OSD::ms_handle_connect(ceph::net::ConnectionRef conn)
+seastar::future<> OSD::ms_handle_connect(crimson::net::ConnectionRef conn)
{
if (conn->get_peer_type() != CEPH_ENTITY_TYPE_MON) {
return seastar::now();
}
}
-seastar::future<> OSD::ms_handle_reset(ceph::net::ConnectionRef conn)
+seastar::future<> OSD::ms_handle_reset(crimson::net::ConnectionRef conn)
{
// TODO: cleanup the session attached to this connection
logger().warn("ms_handle_reset");
return seastar::now();
}
-seastar::future<> OSD::ms_handle_remote_reset(ceph::net::ConnectionRef conn)
+seastar::future<> OSD::ms_handle_remote_reset(crimson::net::ConnectionRef conn)
{
logger().warn("ms_handle_remote_reset");
return seastar::now();
});
}
-bool OSD::require_mon_peer(ceph::net::Connection *conn, Ref<Message> m)
+bool OSD::require_mon_peer(crimson::net::Connection *conn, Ref<Message> m)
{
if (!conn->peer_is_mon()) {
logger().info("{} received from non-mon {}, {}",
});
}
-seastar::future<> OSD::handle_osd_map(ceph::net::Connection* conn,
+seastar::future<> OSD::handle_osd_map(crimson::net::Connection* conn,
Ref<MOSDMap> m)
{
logger().info("handle_osd_map {}", *m);
});
}
-seastar::future<> OSD::handle_osd_op(ceph::net::Connection* conn,
+seastar::future<> OSD::handle_osd_op(crimson::net::Connection* conn,
Ref<MOSDOp> m)
{
shard_services.start_operation<ClientRequest>(
return seastar::now();
}
-seastar::future<> OSD::handle_rep_op(ceph::net::Connection* conn,
+seastar::future<> OSD::handle_rep_op(crimson::net::Connection* conn,
Ref<MOSDRepOp> m)
{
m->finish_decode();
return seastar::now();
}
-seastar::future<> OSD::handle_rep_op_reply(ceph::net::Connection* conn,
+seastar::future<> OSD::handle_rep_op_reply(crimson::net::Connection* conn,
Ref<MOSDRepOpReply> m)
{
const auto& pgs = pg_map.get_pgs();
}
seastar::future<> OSD::handle_peering_op(
- ceph::net::Connection* conn,
+ crimson::net::Connection* conn,
Ref<MOSDPeeringOp> m)
{
const int from = m->get_source().num();
class OSDMeta;
class Heartbeat;
-namespace ceph::mon {
+namespace ceph::os {
+ class Transaction;
+}
+
+namespace crimson::mon {
class Client;
}
-namespace ceph::net {
+namespace crimson::net {
class Messenger;
}
-namespace ceph::os {
+namespace crimson::os {
class FuturizedStore;
- class Transaction;
}
-namespace ceph::osd {
+namespace crimson::osd {
class PG;
-class OSD final : public ceph::net::Dispatcher,
+class OSD final : public crimson::net::Dispatcher,
private OSDMapService,
- private ceph::common::AuthHandler,
- private ceph::mgr::WithStats {
+ private crimson::common::AuthHandler,
+ private crimson::mgr::WithStats {
seastar::gate gate;
const int whoami;
const uint32_t nonce;
seastar::timer<seastar::lowres_clock> beacon_timer;
// talk with osd
- ceph::net::Messenger& cluster_msgr;
+ crimson::net::Messenger& cluster_msgr;
// talk with client/mon/mgr
- ceph::net::Messenger& public_msgr;
+ crimson::net::Messenger& public_msgr;
ChainedDispatchers dispatchers;
- std::unique_ptr<ceph::mon::Client> monc;
- std::unique_ptr<ceph::mgr::Client> mgrc;
+ std::unique_ptr<crimson::mon::Client> monc;
+ std::unique_ptr<crimson::mgr::Client> mgrc;
SharedLRU<epoch_t, OSDMap> osdmaps;
SimpleLRU<epoch_t, bufferlist, false> map_bl_cache;
cached_map_t osdmap;
// TODO: use a wrapper for ObjectStore
- std::unique_ptr<ceph::os::FuturizedStore> store;
+ std::unique_ptr<crimson::os::FuturizedStore> store;
std::unique_ptr<OSDMeta> meta_coll;
OSDState state;
OSDSuperblock superblock;
// Dispatcher methods
- seastar::future<> ms_dispatch(ceph::net::Connection* conn, MessageRef m) final;
- seastar::future<> ms_handle_connect(ceph::net::ConnectionRef conn) final;
- seastar::future<> ms_handle_reset(ceph::net::ConnectionRef conn) final;
- seastar::future<> ms_handle_remote_reset(ceph::net::ConnectionRef conn) final;
+ seastar::future<> ms_dispatch(crimson::net::Connection* conn, MessageRef m) final;
+ seastar::future<> ms_handle_connect(crimson::net::ConnectionRef conn) final;
+ seastar::future<> ms_handle_reset(crimson::net::ConnectionRef conn) final;
+ seastar::future<> ms_handle_remote_reset(crimson::net::ConnectionRef conn) final;
// mgr::WithStats methods
MessageRef get_stats() final;
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) final;
- ceph::osd::ShardServices shard_services;
+ crimson::osd::ShardServices shard_services;
std::unordered_map<spg_t, Ref<PG>> pgs;
std::unique_ptr<Heartbeat> heartbeat;
public:
OSD(int id, uint32_t nonce,
- ceph::net::Messenger& cluster_msgr,
- ceph::net::Messenger& client_msgr,
- ceph::net::Messenger& hb_front_msgr,
- ceph::net::Messenger& hb_back_msgr);
+ crimson::net::Messenger& cluster_msgr,
+ crimson::net::Messenger& client_msgr,
+ crimson::net::Messenger& hb_front_msgr,
+ crimson::net::Messenger& hb_back_msgr);
~OSD() final;
seastar::future<> mkfs(uuid_d osd_uuid, uuid_d cluster_fsid);
void write_superblock(ceph::os::Transaction& t);
seastar::future<> read_superblock();
- bool require_mon_peer(ceph::net::Connection *conn, Ref<Message> m);
+ bool require_mon_peer(crimson::net::Connection *conn, Ref<Message> m);
seastar::future<Ref<PG>> handle_pg_create_info(
std::unique_ptr<PGCreateInfo> info);
- seastar::future<> handle_osd_map(ceph::net::Connection* conn,
+ seastar::future<> handle_osd_map(crimson::net::Connection* conn,
Ref<MOSDMap> m);
- seastar::future<> handle_osd_op(ceph::net::Connection* conn,
+ seastar::future<> handle_osd_op(crimson::net::Connection* conn,
Ref<MOSDOp> m);
- seastar::future<> handle_rep_op(ceph::net::Connection* conn,
+ seastar::future<> handle_rep_op(crimson::net::Connection* conn,
Ref<MOSDRepOp> m);
- seastar::future<> handle_rep_op_reply(ceph::net::Connection* conn,
+ seastar::future<> handle_rep_op_reply(crimson::net::Connection* conn,
Ref<MOSDRepOpReply> m);
- seastar::future<> handle_peering_op(ceph::net::Connection* conn,
+ seastar::future<> handle_peering_op(crimson::net::Connection* conn,
Ref<MOSDPeeringOp> m);
seastar::future<> committed_osd_maps(version_t first,
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/replicated_request.h"
-namespace ceph::osd {
+namespace crimson::osd {
-struct OSDConnectionPriv : public ceph::net::Connection::user_private_t {
+struct OSDConnectionPriv : public crimson::net::Connection::user_private_t {
ClientRequest::ConnectionPipeline client_request_conn_pipeline;
RemotePeeringEvent::ConnectionPipeline peering_request_conn_pipeline;
RepRequest::ConnectionPipeline replicated_request_conn_pipeline;
};
-static OSDConnectionPriv &get_osd_priv(ceph::net::Connection *conn) {
+static OSDConnectionPriv &get_osd_priv(crimson::net::Connection *conn) {
if (!conn->has_user_private()) {
conn->set_user_private(std::make_unique<OSDConnectionPriv>());
}
#include "osd/osd_types.h"
namespace ceph::os {
+ class Transaction;
+}
+
+namespace crimson::os {
class FuturizedCollection;
class FuturizedStore;
- class Transaction;
}
/// metadata shared across PGs, or put in another way,
class OSDMeta {
template<typename T> using Ref = boost::intrusive_ptr<T>;
- ceph::os::FuturizedStore* store;
- Ref<ceph::os::FuturizedCollection> coll;
+ crimson::os::FuturizedStore* store;
+ Ref<crimson::os::FuturizedCollection> coll;
public:
- OSDMeta(Ref<ceph::os::FuturizedCollection> coll,
- ceph::os::FuturizedStore* store)
+ OSDMeta(Ref<crimson::os::FuturizedCollection> coll,
+ crimson::os::FuturizedStore* store)
: store{store}, coll{coll}
{}
// vim: ts=8 sw=2 smarttab
#include "osd_operation.h"
+#include "common/Formatter.h"
-namespace ceph::osd {
+namespace crimson::osd {
-void Operation::dump(Formatter *f)
+void Operation::dump(ceph::Formatter* f)
{
f->open_object_section("operation");
f->dump_string("type", get_type_name());
f->close_section();
}
-void Operation::dump_brief(Formatter *f)
+void Operation::dump_brief(ceph::Formatter* f)
{
f->open_object_section("operation");
f->dump_string("type", get_type_name());
return lhs;
}
-void Blocker::dump(Formatter *f) const
+void Blocker::dump(ceph::Formatter* f) const
{
f->open_object_section("blocker");
f->dump_string("op_type", get_type_name());
exit();
}
-void OrderedPipelinePhase::dump_detail(Formatter *f) const
+void OrderedPipelinePhase::dump_detail(ceph::Formatter* f) const
{
}
#pragma once
-#include <seastar/core/shared_mutex.hh>
-#include <seastar/core/future.hh>
-
-#include <vector>
-#include <array>
#include <algorithm>
+#include <array>
#include <set>
+#include <vector>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
+#include <seastar/core/shared_mutex.hh>
+#include <seastar/core/future.hh>
-#include "common/Formatter.h"
+namespace ceph {
+ class Formatter;
+}
-namespace ceph::osd {
+namespace crimson::osd {
enum class OperationTypeCode {
client_request = 0,
*/
class Blocker {
protected:
- virtual void dump_detail(Formatter *f) const = 0;
+ virtual void dump_detail(ceph::Formatter *f) const = 0;
public:
template <typename... T>
return blocking_future(this, std::move(f));
}
- void dump(Formatter *f) const;
+ void dump(ceph::Formatter *f) const;
virtual const char *get_type_name() const = 0;
id = in_id;
}
protected:
- virtual void dump_detail(Formatter *f) const = 0;
+ virtual void dump_detail(ceph::Formatter *f) const = 0;
public:
uint64_t get_id() const {
});
}
- void dump(Formatter *f);
- void dump_brief(Formatter *f);
+ void dump(ceph::Formatter *f);
+ void dump_brief(ceph::Formatter *f);
virtual ~Operation() = default;
};
using OperationRef = boost::intrusive_ptr<Operation>;
class OperationT : public Operation {
protected:
- virtual void dump_detail(Formatter *f) const = 0;
+ virtual void dump_detail(ceph::Formatter *f) const = 0;
public:
static constexpr const char *type_name = OP_NAMES[static_cast<int>(T::type)];
const char * name;
protected:
- virtual void dump_detail(Formatter *f) const final;
+ virtual void dump_detail(ceph::Formatter *f) const final;
const char *get_type_name() const final {
return name;
}
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
ClientRequest::ClientRequest(
- OSD &osd, ceph::net::ConnectionRef conn, Ref<MOSDOp> &&m)
+ OSD &osd, crimson::net::ConnectionRef conn, Ref<MOSDOp> &&m)
: osd(osd), conn(conn), m(m)
{}
class MOSDOp;
-namespace ceph::osd {
+namespace crimson::osd {
class PG;
class OSD;
class ClientRequest final : public OperationT<ClientRequest> {
OSD &osd;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
Ref<MOSDOp> m;
OrderedPipelinePhase::Handle handle;
static constexpr OperationTypeCode type = OperationTypeCode::client_request;
- ClientRequest(OSD &osd, ceph::net::ConnectionRef, Ref<MOSDOp> &&m);
+ ClientRequest(OSD &osd, crimson::net::ConnectionRef, Ref<MOSDOp> &&m);
void print(std::ostream &) const final;
void dump_detail(Formatter *f) const final;
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
namespace {
-using namespace ceph::osd;
+using namespace crimson::osd;
struct compound_state {
seastar::promise<BufferedRecoveryMessages> promise;
PeeringSubEvent(compound_state_ref state, Args &&... args) :
RemotePeeringEvent(std::forward<Args>(args)...), state(state) {}
- seastar::future<> complete_rctx(Ref<ceph::osd::PG> pg) final {
+ seastar::future<> complete_rctx(Ref<crimson::osd::PG> pg) final {
logger().debug("{}: submitting ctx transaction", *this);
state->ctx.accept_buffered_messages(ctx);
state = {};
std::vector<OperationRef> handle_pg_create(
OSD &osd,
- ceph::net::ConnectionRef conn,
+ crimson::net::ConnectionRef conn,
compound_state_ref state,
Ref<MOSDPGCreate2> m)
{
} // namespace
-namespace ceph::osd {
+namespace crimson::osd {
CompoundPeeringRequest::CompoundPeeringRequest(
- OSD &osd, ceph::net::ConnectionRef conn, Ref<Message> m)
+ OSD &osd, crimson::net::ConnectionRef conn, Ref<Message> m)
: osd(osd),
conn(conn),
m(m)
});
}
-} // namespace ceph::osd
+} // namespace crimson::osd
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
-namespace ceph::osd {
+namespace crimson::osd {
class OSD;
class PG;
private:
OSD &osd;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
Ref<Message> m;
public:
CompoundPeeringRequest(
- OSD &osd, ceph::net::ConnectionRef conn, Ref<Message> m);
+ OSD &osd, crimson::net::ConnectionRef conn, Ref<Message> m);
void print(std::ostream &) const final;
void dump_detail(Formatter *f) const final;
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
void PeeringEvent::print(std::ostream &lhs) const
{
#include "osd/PGPeeringEvent.h"
#include "osd/PeeringState.h"
-namespace ceph::osd {
+namespace ceph {
+ class Formatter;
+}
+
+namespace crimson::osd {
class OSD;
class ShardServices;
void print(std::ostream &) const final;
- void dump_detail(Formatter *f) const final;
+ void dump_detail(ceph::Formatter* f) const final;
seastar::future<> start();
};
class RemotePeeringEvent : public PeeringEvent {
protected:
OSD &osd;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
void on_pg_absent() final;
seastar::future<> complete_rctx(Ref<PG> pg) override;
};
template <typename... Args>
- RemotePeeringEvent(OSD &osd, ceph::net::ConnectionRef conn, Args&&... args) :
+ RemotePeeringEvent(OSD &osd, crimson::net::ConnectionRef conn, Args&&... args) :
PeeringEvent(std::forward<Args>(args)...),
osd(osd),
conn(conn)
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
-#include <seastar/core/future.hh>
+#include "crimson/osd/osd_operations/pg_advance_map.h"
#include <boost/smart_ptr/local_shared_ptr.hpp>
+#include <seastar/core/future.hh>
+
#include "include/types.h"
-#include "crimson/osd/osd_operations/pg_advance_map.h"
+#include "common/Formatter.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/osd.h"
-#include "common/Formatter.h"
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
PGAdvanceMap::PGAdvanceMap(
OSD &osd, Ref<PG> pg, epoch_t from, epoch_t to,
#include "crimson/common/type_helpers.h"
#include "osd/PeeringState.h"
-namespace ceph::osd {
+namespace ceph {
+ class Formatter;
+}
+
+namespace crimson::osd {
class OSD;
class PG;
~PGAdvanceMap();
void print(std::ostream &) const final;
- void dump_detail(Formatter *f) const final;
+ void dump_detail(ceph::Formatter *f) const final;
seastar::future<> start();
};
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
RepRequest::RepRequest(OSD &osd,
- ceph::net::ConnectionRef&& conn,
+ crimson::net::ConnectionRef&& conn,
Ref<MOSDRepOp> &&req)
: osd{osd},
conn{std::move(conn)},
class MOSDRepOp;
-namespace ceph::osd {
+namespace ceph {
+ class Formatter;
+}
+
+namespace crimson::osd {
class OSD;
class PG;
friend RepRequest;
};
static constexpr OperationTypeCode type = OperationTypeCode::replicated_request;
- RepRequest(OSD&, ceph::net::ConnectionRef&&, Ref<MOSDRepOp>&&);
+ RepRequest(OSD&, crimson::net::ConnectionRef&&, Ref<MOSDRepOp>&&);
void print(std::ostream &) const final;
- void dump_detail(Formatter *f) const final;
+ void dump_detail(ceph::Formatter* f) const final;
seastar::future<> start();
private:
PGPipeline &pp(PG &pg);
OSD &osd;
- ceph::net::ConnectionRef conn;
+ crimson::net::ConnectionRef conn;
Ref<MOSDRepOp> req;
OrderedPipelinePhase::Handle handle;
};
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
void OSDMapGate::OSDMapBlocker::dump_detail(Formatter *f) const
{
namespace ceph {
class Formatter;
- namespace osd {
- class ShardServices;
- }
}
-namespace ceph::osd {
+namespace crimson::osd {
+
+class ShardServices;
class OSDMapGate {
struct OSDMapBlocker : public Blocker {
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
}
}
-namespace ceph::osd {
+namespace crimson::osd {
-using ceph::common::local_conf;
+using crimson::common::local_conf;
class RecoverablePredicate : public IsPGRecoverablePredicate {
public:
PG::PG(
spg_t pgid,
pg_shard_t pg_shard,
- ceph::os::CollectionRef coll_ref,
+ crimson::os::CollectionRef coll_ref,
pg_pool_t&& pool,
std::string&& name,
cached_map_t osdmap,
void PG::init(
- ceph::os::CollectionRef coll,
+ crimson::os::CollectionRef coll,
int role,
const vector<int>& newup, int new_up_primary,
const vector<int>& newacting, int new_acting_primary,
new_acting_primary, history, pi, backfill, t);
}
-seastar::future<> PG::read_state(ceph::os::FuturizedStore* store)
+seastar::future<> PG::read_state(crimson::os::FuturizedStore* store)
{
return store->open_collection(coll_t(pgid)).then([this, store](auto ch) {
coll_ref = ch;
0, false);
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
return seastar::make_ready_future<Ref<MOSDOpReply>>(std::move(reply));
- }).handle_exception_type([=,&oid](const ceph::osd::error& e) {
- logger().debug("got ceph::osd::error while handling object {}: {} ({})",
+ }).handle_exception_type([=,&oid](const crimson::osd::error& e) {
+ logger().debug("got crimson::osd::error while handling object {}: {} ({})",
oid, e.code(), e.what());
return backend->evict_object_state(oid).then([=] {
auto reply = make_message<MOSDOpReply>(
CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK,
false);
return seastar::make_ready_future<Ref<MOSDOpReply>>(std::move(reply));
- }).handle_exception_type([=](const ceph::osd::error& e) {
+ }).handle_exception_type([=](const crimson::osd::error& e) {
auto reply = make_message<MOSDOpReply>(
m.get(), -e.code().value(), get_osdmap_epoch(), 0, false);
reply->set_enoent_reply_versions(peering_state.get_info().last_update,
});
}
-seastar::future<> PG::handle_op(ceph::net::Connection* conn,
+seastar::future<> PG::handle_op(crimson::net::Connection* conn,
Ref<MOSDOp> m)
{
return wait_for_active().then([conn, m, this] {
});
}
-void PG::handle_rep_op_reply(ceph::net::Connection* conn,
+void PG::handle_rep_op_reply(crimson::net::Connection* conn,
const MOSDRepOpReply& m)
{
backend->got_rep_op_reply(m);
class Context;
}
-namespace ceph::net {
+namespace crimson::net {
class Messenger;
}
-namespace ceph::os {
+namespace crimson::os {
class FuturizedStore;
}
-namespace ceph::osd {
+namespace crimson::osd {
class ClientRequest;
class PG : public boost::intrusive_ref_counter<
spg_t pgid;
pg_shard_t pg_whoami;
coll_t coll;
- ceph::os::CollectionRef coll_ref;
+ crimson::os::CollectionRef coll_ref;
ghobject_t pgmeta_oid;
public:
PG(spg_t pgid,
pg_shard_t pg_shard,
- ceph::os::CollectionRef coll_ref,
+ crimson::os::CollectionRef coll_ref,
pg_pool_t&& pool,
std::string&& name,
cached_map_t osdmap,
return ceph_subsys_osd;
}
- ceph::os::CollectionRef get_collection_ref() {
+ crimson::os::CollectionRef get_collection_ref() {
return coll_ref;
}
bool dirty_info,
bool dirty_big_info,
bool need_write_epoch,
- ObjectStore::Transaction &t) final {
+ ceph::os::Transaction &t) final {
std::map<string,bufferlist> km;
if (dirty_big_info || dirty_info) {
int ret = prepare_info_keymap(
bool try_flush_or_schedule_async() final;
void start_flush_on_transaction(
- ObjectStore::Transaction &t) final {
+ ceph::os::Transaction &t) final {
t.register_on_commit(
new LambdaContext([this](int r){
peering_state.complete_flush();
}
void schedule_event_on_commit(
- ObjectStore::Transaction &t,
+ ceph::os::Transaction &t,
PGPeeringEventRef on_commit) final {
t.register_on_commit(
new LambdaContext(
void on_role_change() final {
// Not needed yet
}
- void on_change(ObjectStore::Transaction &t) final {
+ void on_change(ceph::os::Transaction &t) final {
// Not needed yet
}
void on_activate(interval_set<snapid_t> to_trim) final;
// Not needed yet
}
- void on_removal(ObjectStore::Transaction &t) final {
+ void on_removal(ceph::os::Transaction &t) final {
// TODO
}
- void do_delete_work(ObjectStore::Transaction &t) final {
+ void do_delete_work(ceph::os::Transaction &t) final {
// TODO
}
struct PGLogEntryHandler : public PGLog::LogEntryHandler {
PG *pg;
- ObjectStore::Transaction *t;
- PGLogEntryHandler(PG *pg, ObjectStore::Transaction *t) : pg(pg), t(t) {}
+ ceph::os::Transaction *t;
+ PGLogEntryHandler(PG *pg, ceph::os::Transaction *t) : pg(pg), t(t) {}
// LogEntryHandler
void remove(const hobject_t &hoid) override {
}
};
PGLog::LogEntryHandlerRef get_log_handler(
- ObjectStore::Transaction &t) final {
+ ceph::os::Transaction &t) final {
return std::make_unique<PG::PGLogEntryHandler>(this, &t);
}
/// initialize created PG
void init(
- ceph::os::CollectionRef coll_ref,
+ crimson::os::CollectionRef coll_ref,
int role,
const std::vector<int>& up,
int up_primary,
const pg_history_t& history,
const PastIntervals& pim,
bool backfill,
- ObjectStore::Transaction &t);
+ ceph::os::Transaction &t);
- seastar::future<> read_state(ceph::os::FuturizedStore* store);
+ seastar::future<> read_state(crimson::os::FuturizedStore* store);
void do_peering_event(
PGPeeringEvent& evt, PeeringCtx &rctx);
void handle_advance_map(cached_map_t next_map, PeeringCtx &rctx);
void handle_activate_map(PeeringCtx &rctx);
void handle_initialize(PeeringCtx &rctx);
- seastar::future<> handle_op(ceph::net::Connection* conn,
+ seastar::future<> handle_op(crimson::net::Connection* conn,
Ref<MOSDOp> m);
seastar::future<> handle_rep_op(Ref<MOSDRepOp> m);
- void handle_rep_op_reply(ceph::net::Connection* conn,
+ void handle_rep_op_reply(crimson::net::Connection* conn,
const MOSDRepOpReply& m);
void print(std::ostream& os) const;
#include <seastar/core/print.hh>
#include "messages/MOSDOp.h"
+#include "os/Transaction.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/cyan_object.h"
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-using ceph::common::local_conf;
+using crimson::common::local_conf;
-std::unique_ptr<PGBackend> PGBackend::create(pg_t pgid,
- const pg_shard_t pg_shard,
- const pg_pool_t& pool,
- ceph::os::CollectionRef coll,
- ceph::osd::ShardServices& shard_services,
- const ec_profile_t& ec_profile)
+std::unique_ptr<PGBackend>
+PGBackend::create(pg_t pgid,
+ const pg_shard_t pg_shard,
+ const pg_pool_t& pool,
+ crimson::os::CollectionRef coll,
+ crimson::osd::ShardServices& shard_services,
+ const ec_profile_t& ec_profile)
{
switch (pool.type) {
case pg_pool_t::TYPE_REPLICATED:
PGBackend::PGBackend(shard_id_t shard,
CollectionRef coll,
- ceph::os::FuturizedStore* store)
+ crimson::os::FuturizedStore* store)
: shard{shard},
coll{coll},
store{store}
oid.snap);
if (clone == end(ss->clones)) {
return seastar::make_exception_future<PGBackend::cached_os_t>(
- ceph::osd::object_not_found{});
+ crimson::osd::object_not_found{});
}
// clone
auto soid = oid;
if (clone_snap->second.empty()) {
logger().trace("find_object: {}@[] -- DNE", soid);
return seastar::make_exception_future<PGBackend::cached_os_t>(
- ceph::osd::object_not_found{});
+ crimson::osd::object_not_found{});
}
auto first = clone_snap->second.back();
auto last = clone_snap->second.front();
logger().trace("find_object: {}@[{},{}] -- DNE",
soid, first, last);
return seastar::make_exception_future<PGBackend::cached_os_t>(
- ceph::osd::object_not_found{});
+ crimson::osd::object_not_found{});
}
logger().trace("find_object: {}@[{},{}] -- HIT",
soid, first, last);
OI_ATTR).then_wrapped([oid, this](auto fut) {
if (fut.failed()) {
auto ep = std::move(fut).get_exception();
- if (!ceph::os::FuturizedStore::EnoentException::is_class_of(ep)) {
+ if (!crimson::os::FuturizedStore::EnoentException::is_class_of(ep)) {
std::rethrow_exception(ep);
}
return seastar::make_ready_future<cached_os_t>(
std::unique_ptr<SnapSet> snapset;
if (fut.failed()) {
auto ep = std::move(fut).get_exception();
- if (!ceph::os::FuturizedStore::EnoentException::is_class_of(ep)) {
+ if (!crimson::os::FuturizedStore::EnoentException::is_class_of(ep)) {
std::rethrow_exception(ep);
} else {
snapset = std::make_unique<SnapSet>();
});
}
-seastar::future<ceph::osd::acked_peers_t>
+seastar::future<crimson::osd::acked_peers_t>
PGBackend::mutate_object(
std::set<pg_shard_t> pg_shards,
cached_os_t&& os,
logger().error("full-object read crc {} != expected {} on {}",
crc, *maybe_crc, soid);
// todo: mark soid missing, perform recovery, and retry
- throw ceph::osd::object_corrupted{};
+ throw crimson::osd::object_corrupted{};
}
}
return seastar::make_ready_future<bufferlist>(std::move(bl));
encode(os.oi.mtime, osd_op.outdata);
} else {
logger().debug("stat object does not exist");
- throw ceph::osd::object_not_found{};
+ throw crimson::osd::object_not_found{};
}
return seastar::now();
// TODO: ctx->delta_stats.num_rd++;
{
const ceph_osd_op& op = osd_op.op;
if (op.extent.length != osd_op.indata.length()) {
- throw ceph::osd::invalid_argument();
+ throw crimson::osd::invalid_argument();
}
const bool existing = maybe_create_new_object(os, txn);
if (os.exists && !os.oi.is_whiteout() &&
(osd_op.op.flags & CEPH_OSD_OP_FLAG_EXCL)) {
// this is an exclusive create
- throw ceph::osd::make_error(-EEXIST);
+ throw crimson::osd::make_error(-EEXIST);
}
if (osd_op.indata.length()) {
std::string category;
decode(category, p);
} catch (buffer::error&) {
- throw ceph::osd::invalid_argument();
+ throw crimson::osd::invalid_argument();
}
}
maybe_create_new_object(os, txn);
{
if (local_conf()->osd_max_attr_size > 0 &&
osd_op.op.xattr.value_len > local_conf()->osd_max_attr_size) {
- throw ceph::osd::make_error(-EFBIG);
+ throw crimson::osd::make_error(-EFBIG);
}
const auto max_name_len = std::min<uint64_t>(
store->get_max_attr_name_length(), local_conf()->osd_max_attr_name_len);
if (osd_op.op.xattr.name_len > max_name_len) {
- throw ceph::osd::make_error(-ENAMETOOLONG);
+ throw crimson::osd::make_error(-ENAMETOOLONG);
}
maybe_create_new_object(os, txn);
osd_op.op.xattr.value_len = osd_op.outdata.length();
//ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
}).handle_exception_type(
- [] (ceph::os::FuturizedStore::EnoentException&) {
- return seastar::make_exception_future<>(ceph::osd::object_not_found{});
+ [] (crimson::os::FuturizedStore::EnoentException&) {
+ return seastar::make_exception_future<>(crimson::osd::object_not_found{});
}).handle_exception_type(
- [] (ceph::os::FuturizedStore::EnodataException&) {
- return seastar::make_exception_future<>(ceph::osd::no_message_available{});
+ [] (crimson::os::FuturizedStore::EnodataException&) {
+ return seastar::make_exception_future<>(crimson::osd::no_message_available{});
});
//ctx->delta_stats.num_rd++;
}
return store->get_attr(coll, ghobject_t{soid}, key);
}
-static seastar::future<ceph::os::FuturizedStore::omap_values_t>
+static seastar::future<crimson::os::FuturizedStore::omap_values_t>
maybe_get_omap_vals_by_keys(
auto& store,
const auto& coll,
if (oi.is_omap()) {
return store->omap_get_values(coll, ghobject_t{oi.soid}, keys_to_get);
} else {
- return seastar::make_ready_future<ceph::os::FuturizedStore::omap_values_t>(
- ceph::os::FuturizedStore::omap_values_t{});
+ return seastar::make_ready_future<crimson::os::FuturizedStore::omap_values_t>(
+ crimson::os::FuturizedStore::omap_values_t{});
}
}
-static seastar::future<bool, ceph::os::FuturizedStore::omap_values_t>
+static seastar::future<bool, crimson::os::FuturizedStore::omap_values_t>
maybe_get_omap_vals(
auto& store,
const auto& coll,
if (oi.is_omap()) {
return store->omap_get_values(coll, ghobject_t{oi.soid}, start_after);
} else {
- return seastar::make_ready_future<bool, ceph::os::FuturizedStore::omap_values_t>(
- true, ceph::os::FuturizedStore::omap_values_t{});
+ return seastar::make_ready_future<bool, crimson::os::FuturizedStore::omap_values_t>(
+ true, crimson::os::FuturizedStore::omap_values_t{});
}
}
decode(start_after, p);
decode(max_return, p);
} catch (buffer::error&) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
max_return =
std::min(max_return, local_conf()->osd_max_omap_entries_per_request);
// TODO: truly chunk the reading
return maybe_get_omap_vals(store, coll, os.oi, start_after).then(
- [=, &osd_op] (bool, ceph::os::FuturizedStore::omap_values_t vals) {
+ [=, &osd_op] (bool, crimson::os::FuturizedStore::omap_values_t vals) {
ceph::bufferlist result;
bool truncated = false;
uint32_t num = 0;
decode(max_return, p);
decode(filter_prefix, p);
} catch (buffer::error&) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
max_return = \
// TODO: truly chunk the reading
return maybe_get_omap_vals(store, coll, os.oi, start_after).then(
[=, &osd_op] (const bool done,
- ceph::os::FuturizedStore::omap_values_t vals) {
+ crimson::os::FuturizedStore::omap_values_t vals) {
assert(done);
ceph::bufferlist result;
bool truncated = false;
auto p = osd_op.indata.cbegin();
decode(keys_to_get, p);
} catch (buffer::error&) {
- throw ceph::osd::invalid_argument();
+ throw crimson::osd::invalid_argument();
}
return maybe_get_omap_vals_by_keys(store, coll, os.oi, keys_to_get).then(
- [&osd_op] (ceph::os::FuturizedStore::omap_values_t vals) {
+ [&osd_op] (crimson::os::FuturizedStore::omap_values_t vals) {
encode(vals, osd_op.outdata);
return seastar::now();
});
auto p = osd_op.indata.cbegin();
decode_str_str_map_to_bl(p, &to_set_bl);
} catch (buffer::error&) {
- throw ceph::osd::invalid_argument{};
+ throw crimson::osd::invalid_argument{};
}
txn.omap_setkeys(coll->get_cid(), ghobject_t{os.oi.soid}, to_set_bl);
#include "crimson/os/futurized_collection.h"
#include "crimson/osd/acked_peers.h"
#include "crimson/common/shared_lru.h"
-#include "os/Transaction.h"
#include "osd/osd_types.h"
#include "osd/osd_internal_types.h"
struct hobject_t;
class MOSDRepOpReply;
-namespace ceph::osd {
+namespace ceph::os {
+ class Transaction;
+}
+
+namespace crimson::osd {
class ShardServices;
}
class PGBackend
{
protected:
- using CollectionRef = ceph::os::CollectionRef;
+ using CollectionRef = crimson::os::CollectionRef;
using ec_profile_t = std::map<std::string, std::string>;
public:
- PGBackend(shard_id_t shard, CollectionRef coll, ceph::os::FuturizedStore* store);
+ PGBackend(shard_id_t shard, CollectionRef coll, crimson::os::FuturizedStore* store);
virtual ~PGBackend() = default;
static std::unique_ptr<PGBackend> create(pg_t pgid,
const pg_shard_t pg_shard,
const pg_pool_t& pool,
- ceph::os::CollectionRef coll,
- ceph::osd::ShardServices& shard_services,
+ crimson::os::CollectionRef coll,
+ crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile);
using cached_os_t = boost::local_shared_ptr<ObjectState>;
seastar::future<cached_os_t> get_object_state(const hobject_t& oid);
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans);
- seastar::future<ceph::osd::acked_peers_t> mutate_object(
+ seastar::future<crimson::osd::acked_peers_t> mutate_object(
std::set<pg_shard_t> pg_shards,
cached_os_t&& os,
ceph::os::Transaction&& txn,
protected:
const shard_id_t shard;
CollectionRef coll;
- ceph::os::FuturizedStore* store;
+ crimson::os::FuturizedStore* store;
private:
using cached_ss_t = boost::local_shared_ptr<SnapSet>;
size_t length,
uint32_t flags) = 0;
bool maybe_create_new_object(ObjectState& os, ceph::os::Transaction& txn);
- virtual seastar::future<ceph::osd::acked_peers_t>
+ virtual seastar::future<crimson::osd::acked_peers_t>
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
PGMap::PGCreationState::PGCreationState(spg_t pgid) : pgid(pgid) {}
PGMap::PGCreationState::~PGCreationState() {}
#include "crimson/osd/pg.h"
#include "osd/osd_types.h"
-namespace ceph::osd {
+namespace crimson::osd {
class PG;
class PGMap {
// prefix pgmeta_oid keys with _ so that PGLog::read_log_and_missing() can
// easily skip them
-using ceph::os::FuturizedStore;
+using crimson::os::FuturizedStore;
PGMeta::PGMeta(FuturizedStore* store, spg_t pgid)
: store{store},
#include <seastar/core/future.hh>
#include "osd/osd_types.h"
-namespace ceph::os {
+namespace crimson::os {
class FuturizedStore;
}
/// PG related metadata
class PGMeta
{
- ceph::os::FuturizedStore* store;
+ crimson::os::FuturizedStore* store;
const spg_t pgid;
public:
- PGMeta(ceph::os::FuturizedStore *store, spg_t pgid);
+ PGMeta(crimson::os::FuturizedStore *store, spg_t pgid);
seastar::future<epoch_t> get_epoch();
seastar::future<pg_info_t, PastIntervals> load();
};
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
ReplicatedBackend::ReplicatedBackend(pg_t pgid,
pg_shard_t whoami,
ReplicatedBackend::CollectionRef coll,
- ceph::osd::ShardServices& shard_services)
+ crimson::osd::ShardServices& shard_services)
: PGBackend{whoami.shard, coll, &shard_services.get_store()},
pgid{pgid},
whoami{whoami},
return store->read(coll, ghobject_t{hoid}, off, len, flags);
}
-seastar::future<ceph::osd::acked_peers_t>
+seastar::future<crimson::osd::acked_peers_t>
ReplicatedBackend::_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
pending_txn->second.all_committed = {};
auto acked_peers = std::move(pending_txn->second.acked_peers);
pending_trans.erase(pending_txn);
- return seastar::make_ready_future<ceph::osd::acked_peers_t>(std::move(acked_peers));
+ return seastar::make_ready_future<crimson::osd::acked_peers_t>(std::move(acked_peers));
});
}
#include "acked_peers.h"
#include "pg_backend.h"
-namespace ceph::osd {
+namespace crimson::osd {
class ShardServices;
}
public:
ReplicatedBackend(pg_t pgid, pg_shard_t whoami,
CollectionRef coll,
- ceph::osd::ShardServices& shard_services);
+ crimson::osd::ShardServices& shard_services);
void got_rep_op_reply(const MOSDRepOpReply& reply) final;
private:
seastar::future<ceph::bufferlist> _read(const hobject_t& hoid,
uint64_t off,
uint64_t len,
uint32_t flags) override;
- seastar::future<ceph::osd::acked_peers_t>
+ seastar::future<crimson::osd::acked_peers_t>
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
eversion_t ver) final;
const pg_t pgid;
const pg_shard_t whoami;
- ceph::osd::ShardServices& shard_services;
+ crimson::osd::ShardServices& shard_services;
ceph_tid_t next_txn_id = 0;
struct pending_on_t {
pending_on_t(size_t pending)
: pending{static_cast<unsigned>(pending)}
{}
unsigned pending;
- ceph::osd::acked_peers_t acked_peers;
+ crimson::osd::acked_peers_t acked_peers;
seastar::promise<> all_committed;
};
using pending_transactions_t = std::map<ceph_tid_t, pending_on_t>;
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_osd);
+ return crimson::get_logger(ceph_subsys_osd);
}
}
-namespace ceph::osd {
+namespace crimson::osd {
ShardServices::ShardServices(
OSDMapService &osdmap_service,
- ceph::net::Messenger &cluster_msgr,
- ceph::net::Messenger &public_msgr,
- ceph::mon::Client &monc,
- ceph::mgr::Client &mgrc,
- ceph::os::FuturizedStore &store)
+ crimson::net::Messenger &cluster_msgr,
+ crimson::net::Messenger &public_msgr,
+ crimson::mon::Client &monc,
+ crimson::mgr::Client &mgrc,
+ crimson::os::FuturizedStore &store)
: osdmap_service(osdmap_service),
cluster_msgr(cluster_msgr),
public_msgr(public_msgr),
}
seastar::future<> ShardServices::dispatch_context_transaction(
- ceph::os::CollectionRef col, PeeringCtx &ctx) {
+ crimson::os::CollectionRef col, PeeringCtx &ctx) {
auto ret = store.do_transaction(
col,
std::move(ctx.transaction));
}
seastar::future<> ShardServices::dispatch_context(
- ceph::os::CollectionRef col,
+ crimson::os::CollectionRef col,
PeeringCtx &&ctx)
{
ceph_assert(col || ctx.transaction.empty());
#include "osd/PeeringState.h"
#include "crimson/osd/osdmap_service.h"
-namespace ceph::net {
+namespace crimson::net {
class Messenger;
}
-namespace ceph::mgr {
+namespace crimson::mgr {
class Client;
}
-namespace ceph::mon {
+namespace crimson::mon {
class Client;
}
-namespace ceph::os {
+namespace crimson::os {
class FuturizedStore;
}
class PeeringCtx;
class BufferedRecoveryMessages;
-namespace ceph::osd {
+namespace crimson::osd {
/**
* Represents services available to each PG
class ShardServices {
using cached_map_t = boost::local_shared_ptr<const OSDMap>;
OSDMapService &osdmap_service;
- ceph::net::Messenger &cluster_msgr;
- ceph::net::Messenger &public_msgr;
- ceph::mon::Client &monc;
- ceph::mgr::Client &mgrc;
- ceph::os::FuturizedStore &store;
+ crimson::net::Messenger &cluster_msgr;
+ crimson::net::Messenger &public_msgr;
+ crimson::mon::Client &monc;
+ crimson::mgr::Client &mgrc;
+ crimson::os::FuturizedStore &store;
CephContext cct;
public:
ShardServices(
OSDMapService &osdmap_service,
- ceph::net::Messenger &cluster_msgr,
- ceph::net::Messenger &public_msgr,
- ceph::mon::Client &monc,
- ceph::mgr::Client &mgrc,
- ceph::os::FuturizedStore &store);
+ crimson::net::Messenger &cluster_msgr,
+ crimson::net::Messenger &public_msgr,
+ crimson::mon::Client &monc,
+ crimson::mgr::Client &mgrc,
+ crimson::os::FuturizedStore &store);
seastar::future<> send_to_osd(
int peer,
MessageRef m,
epoch_t from_epoch);
- ceph::os::FuturizedStore &get_store() {
+ crimson::os::FuturizedStore &get_store() {
return store;
}
/// Dispatch and reset ctx transaction
seastar::future<> dispatch_context_transaction(
- ceph::os::CollectionRef col, PeeringCtx &ctx);
+ crimson::os::CollectionRef col, PeeringCtx &ctx);
/// Dispatch and reset ctx messages
seastar::future<> dispatch_context_messages(
/// Dispatch ctx and dispose of context
seastar::future<> dispatch_context(
- ceph::os::CollectionRef col,
+ crimson::os::CollectionRef col,
PeeringCtx &&ctx);
/// Dispatch ctx and dispose of ctx, transaction must be empty
std::map<int, HeartbeatStampsRef> heartbeat_stamps;
};
-
}
#include <seastar/core/reactor.hh>
#include <sys/eventfd.h>
-namespace ceph::thread {
+namespace crimson::thread {
/// a synchronization primitive can be used to block a seastar thread, until
/// another thread notifies it.
}
};
-} // namespace ceph::thread
+} // namespace crimson::thread
#include "include/ceph_assert.h"
-namespace ceph::thread {
+namespace crimson::thread {
ThreadPool::ThreadPool(size_t n_threads,
size_t queue_sz,
{
std::unique_lock lock{mutex};
cond.wait_for(lock,
- ceph::net::conf.threadpool_empty_queue_max_wait,
+ crimson::net::conf.threadpool_empty_queue_max_wait,
[this, &work_item] {
return pending.pop(work_item) || is_stopping();
});
});
}
-} // namespace ceph::thread
+} // namespace crimson::thread
#include "Condition.h"
-namespace ceph::thread {
+namespace crimson::thread {
struct WorkItem {
virtual ~WorkItem() {}
struct Task final : WorkItem {
Func func;
seastar::future_state<T> state;
- ceph::thread::Condition on_done;
+ crimson::thread::Condition on_done;
public:
explicit Task(Func&& f)
: func(std::move(f))
* multiple of the number of cores.
* @param n_threads the number of threads in this thread pool.
* @param cpu the CPU core to which this thread pool is assigned
- * @note each @c Task has its own ceph::thread::Condition, which possesses
+ * @note each @c Task has its own crimson::thread::Condition, which possesses
* possesses an fd, so we should keep the size of queue under a reasonable
* limit.
*/
}
};
-} // namespace ceph::thread
+} // namespace crimson::thread
#include "Throttle.h"
-namespace ceph::thread {
+namespace crimson::thread {
int64_t Throttle::take(int64_t c)
{
(c >= max && count > max)); // except for large c
}
-} // namespace ceph::thread::seastar
+} // namespace crimson::thread
#include "common/ThrottleInterface.h"
-namespace ceph::thread {
+namespace crimson::thread {
class Throttle final : public ThrottleInterface {
size_t max = 0;
bool _should_wait(size_t c) const;
};
-} // namespace ceph::thread
+} // namespace crimson::thread
});
}
-seastar::future<> MonMap::build_monmap(const ceph::common::ConfigProxy& conf,
+seastar::future<> MonMap::build_monmap(const crimson::common::ConfigProxy& conf,
bool for_mkfs)
{
// -m foo?
});
}
-future<> MonMap::build_initial(const ceph::common::ConfigProxy& conf, bool for_mkfs)
+future<> MonMap::build_initial(const crimson::common::ConfigProxy& conf, bool for_mkfs)
{
// file?
if (const auto monmap = conf.get_val<std::string>("monmap");
* @param errout std::ostream to send error messages too
*/
#ifdef WITH_SEASTAR
- seastar::future<> build_initial(const ceph::common::ConfigProxy& conf, bool for_mkfs);
+ seastar::future<> build_initial(const crimson::common::ConfigProxy& conf, bool for_mkfs);
#else
int build_initial(CephContext *cct, bool for_mkfs, std::ostream& errout);
#endif
seastar::future<> read_monmap(const std::string& monmap);
/// try to build monmap with different settings, like
/// mon_host, mon* sections, and mon_dns_srv_name
- seastar::future<> build_monmap(const ceph::common::ConfigProxy& conf, bool for_mkfs);
+ seastar::future<> build_monmap(const crimson::common::ConfigProxy& conf, bool for_mkfs);
/// initialize monmap by resolving given service name
seastar::future<> init_with_dns_srv(bool for_mkfs, const std::string& name);
#else
class Message : public RefCountedObject {
public:
#ifdef WITH_SEASTAR
- using ConnectionRef = ceph::net::ConnectionRef;
+ using ConnectionRef = crimson::net::ConnectionRef;
#else
using ConnectionRef = ::ConnectionRef;
#endif // WITH_SEASTAR
#ifdef WITH_SEASTAR
seastar::future<> read_log_and_missing_crimson(
- ceph::os::FuturizedStore &store,
- ceph::os::CollectionRef ch,
+ crimson::os::FuturizedStore &store,
+ crimson::os::CollectionRef ch,
const pg_info_t &info,
ghobject_t pgmeta_oid
) {
template <typename missing_type>
struct FuturizedStoreLogReader {
- ceph::os::FuturizedStore &store;
- ceph::os::CollectionRef ch;
+ crimson::os::FuturizedStore &store;
+ crimson::os::CollectionRef ch;
const pg_info_t &info;
IndexedLog &log;
missing_type &missing;
[this]() {
return store.omap_get_values(ch, pgmeta_oid, next).then(
[this](
- bool done, ceph::os::FuturizedStore::omap_values_t values) {
+ bool done, crimson::os::FuturizedStore::omap_values_t values) {
for (auto &&p : values) {
process_entry(p);
}
template <typename missing_type>
static seastar::future<> read_log_and_missing_crimson(
- ceph::os::FuturizedStore &store,
- ceph::os::CollectionRef ch,
+ crimson::os::FuturizedStore &store,
+ crimson::os::CollectionRef ch,
const pg_info_t &info,
IndexedLog &log,
missing_type &missing,
using Ref = boost::intrusive_ptr<Message>;
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
enum class perf_mode_t {
{
struct test_state {
struct Server final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Server> {
- ceph::net::Messenger *msgr = nullptr;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
const seastar::shard_id sid;
const seastar::shard_id msgr_sid;
std::string lname;
seastar::future<> stop() {
return seastar::make_ready_future<>();
}
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
seastar::future<> init(bool v1_crc_enabled, const entity_addr_t& addr) {
return container().invoke_on(msgr_sid, [v1_crc_enabled, addr] (auto& server) {
// server msgr is always with nonce 0
- auto&& fut = ceph::net::Messenger::create(entity_name_t::OSD(server.sid), server.lname, 0, server.sid);
+ auto&& fut = crimson::net::Messenger::create(entity_name_t::OSD(server.sid), server.lname, 0, server.sid);
return fut.then(
- [&server, addr, v1_crc_enabled](ceph::net::Messenger *messenger) {
+ [&server, addr, v1_crc_enabled](crimson::net::Messenger *messenger) {
return server.container().invoke_on_all(
[messenger, v1_crc_enabled](auto& server) {
server.msgr = messenger->get_local_shard();
- server.msgr->set_default_policy(ceph::net::SocketPolicy::stateless_server(0));
+ server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
server.msgr->set_auth_client(&server.dummy_auth);
server.msgr->set_auth_server(&server.dummy_auth);
if (v1_crc_enabled) {
};
struct Client final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Client> {
struct ConnStats {
std::string lname;
const unsigned jobs;
- ceph::net::Messenger *msgr = nullptr;
+ crimson::net::Messenger *msgr = nullptr;
const unsigned msg_len;
bufferlist msg_data;
const unsigned nr_depth;
seastar::semaphore depth;
std::vector<mono_time> time_msgs_sent;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
unsigned sent_count = 0u;
- ceph::net::ConnectionRef active_conn = nullptr;
+ crimson::net::ConnectionRef active_conn = nullptr;
bool stop_send = false;
seastar::promise<> stopped_send_promise;
seastar::future<> stop() {
return seastar::now();
}
- seastar::future<> ms_handle_connect(ceph::net::ConnectionRef conn) override {
+ seastar::future<> ms_handle_connect(crimson::net::ConnectionRef conn) override {
conn_stats.connected_time = mono_clock::now();
return seastar::now();
}
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
// server replies with MOSDOp to generate server-side write workload
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
seastar::future<> init(bool v1_crc_enabled) {
return container().invoke_on_all([v1_crc_enabled] (auto& client) {
if (client.is_active()) {
- return ceph::net::Messenger::create(entity_name_t::OSD(client.sid), client.lname, client.sid, client.sid)
- .then([&client, v1_crc_enabled] (ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(entity_name_t::OSD(client.sid), client.lname, client.sid, client.sid)
+ .then([&client, v1_crc_enabled] (crimson::net::Messenger *messenger) {
client.msgr = messenger;
- client.msgr->set_default_policy(ceph::net::SocketPolicy::lossy_client(0));
+ client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
client.msgr->set_require_authorizer(false);
client.msgr->set_auth_client(&client.dummy_auth);
client.msgr->set_auth_server(&client.dummy_auth);
}
private:
- seastar::future<> send_msg(ceph::net::Connection* conn) {
+ seastar::future<> send_msg(crimson::net::Connection* conn) {
ceph_assert(seastar::engine().cpu_id() == sid);
return depth.wait(1).then([this, conn] {
const static pg_t pgid;
return stopped_send_promise.get_future();
}
- void do_dispatch_messages(ceph::net::Connection* conn) {
+ void do_dispatch_messages(crimson::net::Connection* conn) {
ceph_assert(seastar::engine().cpu_id() == sid);
ceph_assert(sent_count == 0);
conn_stats.start_time = mono_clock::now();
};
return seastar::when_all_succeed(
- ceph::net::create_sharded<test_state::Server>(server_conf.core, server_conf.block_size),
- ceph::net::create_sharded<test_state::Client>(client_conf.jobs,
- client_conf.block_size, client_conf.depth))
+ crimson::net::create_sharded<test_state::Server>(server_conf.core, server_conf.block_size),
+ crimson::net::create_sharded<test_state::Client>(client_conf.jobs,
+ client_conf.block_size, client_conf.depth))
.then([=](test_state::Server *server,
test_state::Client *client) {
if (mode == perf_mode_t::both) {
};
struct Server {
- ceph::thread::Throttle byte_throttler;
- ceph::net::Messenger& msgr;
- ceph::auth::DummyAuthClientServer dummy_auth;
- struct ServerDispatcher : ceph::net::Dispatcher {
+ crimson::thread::Throttle byte_throttler;
+ crimson::net::Messenger& msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ struct ServerDispatcher : crimson::net::Dispatcher {
unsigned count = 0;
seastar::condition_variable on_reply;
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
std::cout << "server got ping " << *m << std::endl;
// reply with a pong
0, bufferlist{});
}
} dispatcher;
- Server(ceph::net::Messenger& msgr)
- : byte_throttler(ceph::net::conf.osd_client_message_size_cap),
+ Server(crimson::net::Messenger& msgr)
+ : byte_throttler(crimson::net::conf.osd_client_message_size_cap),
msgr{msgr}
{
msgr.set_crc_header();
};
struct Client {
- ceph::thread::Throttle byte_throttler;
- ceph::net::Messenger& msgr;
- ceph::auth::DummyAuthClientServer dummy_auth;
- struct ClientDispatcher : ceph::net::Dispatcher {
+ crimson::thread::Throttle byte_throttler;
+ crimson::net::Messenger& msgr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
+ struct ClientDispatcher : crimson::net::Dispatcher {
unsigned count = 0;
seastar::condition_variable on_reply;
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
std::cout << "client got pong " << *m << std::endl;
++count;
return seastar::now();
}
} dispatcher;
- Client(ceph::net::Messenger& msgr)
- : byte_throttler(ceph::net::conf.osd_client_message_size_cap),
+ Client(crimson::net::Messenger& msgr)
+ : byte_throttler(crimson::net::conf.osd_client_message_size_cap),
msgr{msgr}
{
msgr.set_crc_header();
class SeastarContext {
seastar::file_desc begin_fd;
- ceph::thread::Condition on_end;
+ crimson::thread::Condition on_end;
public:
SeastarContext()
{
std::cout << "seastar/";
if (role == echo_role::as_server) {
- return ceph::net::Messenger::create(entity_name_t::OSD(0), "server",
+ return crimson::net::Messenger::create(entity_name_t::OSD(0), "server",
addr.get_nonce(), 0)
.then([addr, count] (auto msgr) {
return seastar::do_with(seastar_pingpong::Server{*msgr},
[addr, count](auto& server) mutable {
std::cout << "server listening at " << addr << std::endl;
// bind the server
- server.msgr.set_default_policy(ceph::net::SocketPolicy::stateless_server(0));
+ server.msgr.set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
server.msgr.set_policy_throttler(entity_name_t::TYPE_OSD,
&server.byte_throttler);
server.msgr.set_require_authorizer(false);
});
});
} else {
- return ceph::net::Messenger::create(entity_name_t::OSD(1), "client",
+ return crimson::net::Messenger::create(entity_name_t::OSD(1), "client",
addr.get_nonce(), 0)
.then([addr, count] (auto msgr) {
return seastar::do_with(seastar_pingpong::Client{*msgr},
[addr, count](auto& client) {
std::cout << "client sending to " << addr << std::endl;
- client.msgr.set_default_policy(ceph::net::SocketPolicy::lossy_client(0));
+ client.msgr.set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
client.msgr.set_policy_throttler(entity_name_t::TYPE_OSD,
&client.byte_throttler);
client.msgr.set_require_authorizer(false);
return client.msgr.start(&client.dispatcher)
.then([addr, &client] {
return client.msgr.connect(addr, entity_name_t::TYPE_OSD);
- }).then([&disp=client.dispatcher, count](ceph::net::ConnectionXRef conn) {
+ }).then([&disp=client.dispatcher, count](crimson::net::ConnectionXRef conn) {
return seastar::do_until(
[&disp,count] { return disp.count >= count; },
[&disp,conn] { return (*conn)->send(make_message<MPing>())
#include "common/config_obs.h"
#include "crimson/common/config_proxy.h"
-using Config = ceph::common::ConfigProxy;
+using Config = crimson::common::ConfigProxy;
const std::string test_uint_option = "osd_max_pgls";
const uint64_t INVALID_VALUE = (uint64_t)(-1);
}
public:
ConfigObs() {
- ceph::common::local_conf().add_observer(this);
+ crimson::common::local_conf().add_observer(this);
}
uint64_t get_last_change() const { return last_change; }
uint64_t get_num_changes() const { return num_changes; }
seastar::future<> stop() {
- ceph::common::local_conf().remove_observer(this);
+ crimson::common::local_conf().remove_observer(this);
return seastar::now();
}
};
static seastar::future<> test_config()
{
- return ceph::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
+ return crimson::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
- auto& conf = ceph::common::local_conf();
+ auto& conf = crimson::common::local_conf();
conf->name = init_params.name;
conf->cluster = cluster;
return conf.parse_config_files(conf_file_list);
}).then([] {
- return ceph::common::sharded_conf().invoke_on(0, &Config::start);
+ return crimson::common::sharded_conf().invoke_on(0, &Config::start);
}).then([] {
return sharded_cobs.start();
}).then([] {
- return ceph::common::sharded_conf().invoke_on_all([](Config& config) {
+ return crimson::common::sharded_conf().invoke_on_all([](Config& config) {
return config.set_val(test_uint_option,
std::to_string(seastar::engine().cpu_id()));
});
}).then([] {
- auto expected = ceph::common::local_conf().get_val<uint64_t>(test_uint_option);
- return ceph::common::sharded_conf().invoke_on_all([expected](Config& config) {
+ auto expected = crimson::common::local_conf().get_val<uint64_t>(test_uint_option);
+ return crimson::common::sharded_conf().invoke_on_all([expected](Config& config) {
if (expected != config.get_val<uint64_t>(test_uint_option)) {
throw std::runtime_error("configurations don't match");
}
}).finally([] {
return sharded_cobs.stop();
}).finally([] {
- return ceph::common::sharded_conf().stop();
+ return crimson::common::sharded_conf().stop();
});
}
namespace {
seastar::logger& logger() {
- return ceph::get_logger(ceph_subsys_ms);
+ return crimson::get_logger(ceph_subsys_ms);
}
static std::random_device rd;
{
struct test_state {
struct Server final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Server> {
- ceph::net::Messenger *msgr = nullptr;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Dispatcher* get_local_shard() override {
return &(container().local());
seastar::future<> stop() {
return seastar::make_ready_future<>();
}
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
if (verbose) {
logger().info("server got {}", *m);
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
- auto&& fut = ceph::net::Messenger::create(name, lname, nonce);
- return fut.then([this, addr](ceph::net::Messenger *messenger) {
+ auto&& fut = crimson::net::Messenger::create(name, lname, nonce);
+ return fut.then([this, addr](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& server) {
server.msgr = messenger->get_local_shard();
- server.msgr->set_default_policy(ceph::net::SocketPolicy::stateless_server(0));
+ server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
server.msgr->set_require_authorizer(false);
server.msgr->set_auth_client(&server.dummy_auth);
server.msgr->set_auth_server(&server.dummy_auth);
};
struct Client final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Client> {
struct PingSession : public seastar::enable_shared_from_this<PingSession> {
unsigned rounds;
std::bernoulli_distribution keepalive_dist;
- ceph::net::Messenger *msgr = nullptr;
- std::map<ceph::net::Connection*, seastar::promise<>> pending_conns;
- std::map<ceph::net::Connection*, PingSessionRef> sessions;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ std::map<crimson::net::Connection*, seastar::promise<>> pending_conns;
+ std::map<crimson::net::Connection*, PingSessionRef> sessions;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Client(unsigned rounds, double keepalive_ratio)
: rounds(rounds),
keepalive_dist(std::bernoulli_distribution{keepalive_ratio}) {}
- PingSessionRef find_session(ceph::net::Connection* c) {
+ PingSessionRef find_session(crimson::net::Connection* c) {
auto found = sessions.find(c);
if (found == sessions.end()) {
ceph_assert(false);
seastar::future<> stop() {
return seastar::now();
}
- seastar::future<> ms_handle_connect(ceph::net::ConnectionRef conn) override {
+ seastar::future<> ms_handle_connect(crimson::net::ConnectionRef conn) override {
auto session = seastar::make_shared<PingSession>();
auto [i, added] = sessions.emplace(conn.get(), session);
std::ignore = i;
session->connected_time = mono_clock::now();
return seastar::now();
}
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
auto session = find_session(c);
++(session->count);
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
- return ceph::net::Messenger::create(name, lname, nonce)
- .then([this](ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(name, lname, nonce)
+ .then([this](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& client) {
client.msgr = messenger->get_local_shard();
- client.msgr->set_default_policy(ceph::net::SocketPolicy::lossy_client(0));
+ client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
client.msgr->set_auth_client(&client.dummy_auth);
client.msgr->set_auth_server(&client.dummy_auth);
}).then([this, messenger] {
}
private:
- seastar::future<> do_dispatch_pingpong(ceph::net::Connection* conn) {
+ seastar::future<> do_dispatch_pingpong(crimson::net::Connection* conn) {
return container().invoke_on_all([conn](auto& client) {
auto [i, added] = client.pending_conns.emplace(conn, seastar::promise<>());
std::ignore = i;
logger().info("test_echo(rounds={}, keepalive_ratio={}, v2={}):",
rounds, keepalive_ratio, v2);
return seastar::when_all_succeed(
- ceph::net::create_sharded<test_state::Server>(),
- ceph::net::create_sharded<test_state::Server>(),
- ceph::net::create_sharded<test_state::Client>(rounds, keepalive_ratio),
- ceph::net::create_sharded<test_state::Client>(rounds, keepalive_ratio))
+ crimson::net::create_sharded<test_state::Server>(),
+ crimson::net::create_sharded<test_state::Server>(),
+ crimson::net::create_sharded<test_state::Client>(rounds, keepalive_ratio),
+ crimson::net::create_sharded<test_state::Client>(rounds, keepalive_ratio))
.then([rounds, keepalive_ratio, v2](test_state::Server *server1,
test_state::Server *server2,
test_state::Client *client1,
{
struct test_state {
struct Server final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Server> {
- ceph::net::Messenger *msgr = nullptr;
+ crimson::net::Messenger *msgr = nullptr;
int count = 0;
seastar::promise<> on_second; // satisfied on second dispatch
seastar::promise<> on_done; // satisfied when first dispatch unblocks
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
switch (++count) {
case 1:
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
- return ceph::net::Messenger::create(name, lname, nonce, 0)
- .then([this, addr](ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(name, lname, nonce, 0)
+ .then([this, addr](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& server) {
server.msgr = messenger->get_local_shard();
- server.msgr->set_default_policy(ceph::net::SocketPolicy::stateless_server(0));
+ server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
server.msgr->set_auth_client(&server.dummy_auth);
server.msgr->set_auth_server(&server.dummy_auth);
}).then([messenger, addr] {
};
struct Client final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Client> {
- ceph::net::Messenger *msgr = nullptr;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
- return ceph::net::Messenger::create(name, lname, nonce, 0)
- .then([this](ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(name, lname, nonce, 0)
+ .then([this](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& client) {
client.msgr = messenger->get_local_shard();
- client.msgr->set_default_policy(ceph::net::SocketPolicy::lossy_client(0));
+ client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
client.msgr->set_auth_client(&client.dummy_auth);
client.msgr->set_auth_server(&client.dummy_auth);
}).then([this, messenger] {
logger().info("test_concurrent_dispatch(v2={}):", v2);
return seastar::when_all_succeed(
- ceph::net::create_sharded<test_state::Server>(),
- ceph::net::create_sharded<test_state::Client>())
+ crimson::net::create_sharded<test_state::Server>(),
+ crimson::net::create_sharded<test_state::Client>())
.then([v2](test_state::Server *server,
test_state::Client *client) {
entity_addr_t addr;
.then([server, client] {
return client->msgr->connect(server->msgr->get_myaddr(),
entity_name_t::TYPE_OSD);
- }).then([](ceph::net::ConnectionXRef conn) {
+ }).then([](crimson::net::ConnectionXRef conn) {
// send two messages
return (*conn)->send(make_message<MPing>()).then([conn] {
return (*conn)->send(make_message<MPing>());
seastar::future<> test_preemptive_shutdown(bool v2) {
struct test_state {
class Server final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Server> {
- ceph::net::Messenger *msgr = nullptr;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
return c->send(make_message<MPing>());
}
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
- return ceph::net::Messenger::create(name, lname, nonce, seastar::engine().cpu_id()
- ).then([this, addr](ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(name, lname, nonce, seastar::engine().cpu_id()
+ ).then([this, addr](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& server) {
server.msgr = messenger->get_local_shard();
- server.msgr->set_default_policy(ceph::net::SocketPolicy::stateless_server(0));
+ server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
server.msgr->set_auth_client(&server.dummy_auth);
server.msgr->set_auth_server(&server.dummy_auth);
}).then([messenger, addr] {
};
class Client final
- : public ceph::net::Dispatcher,
+ : public crimson::net::Dispatcher,
public seastar::peering_sharded_service<Client> {
- ceph::net::Messenger *msgr = nullptr;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::net::Messenger *msgr = nullptr;
+ crimson::auth::DummyAuthClientServer dummy_auth;
bool stop_send = false;
seastar::promise<> stopped_send_promise;
- seastar::future<> ms_dispatch(ceph::net::Connection* c,
+ seastar::future<> ms_dispatch(crimson::net::Connection* c,
MessageRef m) override {
return seastar::now();
}
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
- return ceph::net::Messenger::create(name, lname, nonce, seastar::engine().cpu_id()
- ).then([this](ceph::net::Messenger *messenger) {
+ return crimson::net::Messenger::create(name, lname, nonce, seastar::engine().cpu_id()
+ ).then([this](crimson::net::Messenger *messenger) {
return container().invoke_on_all([messenger](auto& client) {
client.msgr = messenger->get_local_shard();
- client.msgr->set_default_policy(ceph::net::SocketPolicy::lossy_client(0));
+ client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
client.msgr->set_auth_client(&client.dummy_auth);
client.msgr->set_auth_server(&client.dummy_auth);
}).then([this, messenger] {
}
seastar::future<> send_pings(const entity_addr_t& addr) {
return msgr->connect(addr, entity_name_t::TYPE_OSD
- ).then([this](ceph::net::ConnectionXRef conn) {
+ ).then([this](crimson::net::ConnectionXRef conn) {
// forwarded to stopped_send_promise
(void) seastar::do_until(
[this] { return stop_send; },
logger().info("test_preemptive_shutdown(v2={}):", v2);
return seastar::when_all_succeed(
- ceph::net::create_sharded<test_state::Server>(),
- ceph::net::create_sharded<test_state::Client>()
+ crimson::net::create_sharded<test_state::Server>(),
+ crimson::net::create_sharded<test_state::Client>()
).then([v2](test_state::Server *server,
test_state::Client *client) {
entity_addr_t addr;
}
using ceph::msgr::v2::Tag;
-using ceph::net::bp_action_t;
-using ceph::net::bp_type_t;
-using ceph::net::Breakpoint;
-using ceph::net::Connection;
-using ceph::net::ConnectionRef;
-using ceph::net::custom_bp_t;
-using ceph::net::Dispatcher;
-using ceph::net::Interceptor;
-using ceph::net::Messenger;
-using ceph::net::SocketPolicy;
-using ceph::net::tag_bp_t;
+using crimson::net::bp_action_t;
+using crimson::net::bp_type_t;
+using crimson::net::Breakpoint;
+using crimson::net::Connection;
+using crimson::net::ConnectionRef;
+using crimson::net::custom_bp_t;
+using crimson::net::Dispatcher;
+using crimson::net::Interceptor;
+using crimson::net::Messenger;
+using crimson::net::SocketPolicy;
+using crimson::net::tag_bp_t;
struct counter_t { unsigned counter = 0; };
}
class FailoverSuite : public Dispatcher {
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Messenger& test_msgr;
const entity_addr_t test_peer_addr;
TestInterceptor interceptor;
};
class FailoverTest : public Dispatcher {
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Messenger& cmd_msgr;
ConnectionRef cmd_conn;
const entity_addr_t test_addr;
class FailoverSuitePeer : public Dispatcher {
using cb_t = std::function<seastar::future<>()>;
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Messenger& peer_msgr;
cb_t op_callback;
};
class FailoverTestPeer : public Dispatcher {
- ceph::auth::DummyAuthClientServer dummy_auth;
+ crimson::auth::DummyAuthClientServer dummy_auth;
Messenger& cmd_msgr;
ConnectionRef cmd_conn;
std::unique_ptr<FailoverSuitePeer> test_suite;
#include "crimson/net/Connection.h"
#include "crimson/net/Messenger.h"
-using Config = ceph::common::ConfigProxy;
-using MonClient = ceph::mon::Client;
+using Config = crimson::common::ConfigProxy;
+using MonClient = crimson::mon::Client;
namespace {
-class DummyAuthHandler : public ceph::common::AuthHandler {
+class DummyAuthHandler : public crimson::common::AuthHandler {
public:
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) final
static seastar::future<> test_monc()
{
- return ceph::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
+ return crimson::common::sharded_conf().start(EntityName{}, string_view{"ceph"}).then([] {
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
- auto& conf = ceph::common::local_conf();
+ auto& conf = crimson::common::local_conf();
conf->name = init_params.name;
conf->cluster = cluster;
return conf.parse_config_files(conf_file_list);
}).then([] {
- return ceph::common::sharded_perf_coll().start();
+ return crimson::common::sharded_perf_coll().start();
}).then([] {
- return ceph::net::Messenger::create(entity_name_t::OSD(0), "monc", 0,
+ return crimson::net::Messenger::create(entity_name_t::OSD(0), "monc", 0,
seastar::engine().cpu_id())
- .then([] (ceph::net::Messenger *msgr) {
- auto& conf = ceph::common::local_conf();
+ .then([] (crimson::net::Messenger *msgr) {
+ auto& conf = crimson::common::local_conf();
if (conf->ms_crc_data) {
msgr->set_crc_data();
}
});
});
}).finally([] {
- return ceph::common::sharded_perf_coll().stop().then([] {
- return ceph::common::sharded_conf().stop();
+ return crimson::common::sharded_perf_coll().stop().then([] {
+ return crimson::common::sharded_conf().stop();
});
});
}
static constexpr uint64_t PERF_VAL = 42;
static seastar::future<> test_perfcounters(){
- return ceph::common::sharded_perf_coll().start().then([] {
- return ceph::common::sharded_perf_coll().invoke_on_all([] (auto& s){
+ return crimson::common::sharded_perf_coll().start().then([] {
+ return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
std::string name =fmt::format("seastar-osd::shard-{}",seastar::engine().cpu_id());
PerfCountersBuilder plb(NULL, name, PERFTEST_FIRST,PERFTEST_LAST);
plb.add_u64_counter(PERFTEST_INDEX, "perftest_count", "count perftest");
s.get_perf_collection()->add(perf_logger);
});
}).then([]{
- return ceph::common::sharded_perf_coll().invoke_on_all([] (auto& s){
+ return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
auto pcc = s.get_perf_collection();
pcc->with_counters([](auto& by_path){
for (auto& perf_counter : by_path) {
});
});
}).finally([] {
- return ceph::common::sharded_perf_coll().stop();
+ return crimson::common::sharded_perf_coll().stop();
});
}
namespace {
using seastar::future;
-using ceph::net::error;
-using ceph::net::Socket;
-using ceph::net::SocketFRef;
-using ceph::net::stop_t;
+using crimson::net::error;
+using crimson::net::Socket;
+using crimson::net::SocketFRef;
+using crimson::net::stop_t;
static seastar::logger logger{"crimsontest"};
future<> test_bind_same() {
logger.info("test_bind_same()...");
- return ceph::net::create_sharded<AcceptTest>(
+ return crimson::net::create_sharded<AcceptTest>(
).then([] (AcceptTest* factory) {
return factory->bind_accept(
).then([] {
// try to bind the same address
- return ceph::net::create_sharded<AcceptTest>(
+ return crimson::net::create_sharded<AcceptTest>(
).then([] (AcceptTest* factory2) {
return factory2->bind_accept(
).then([] {
future<> test_accept() {
logger.info("test_accept()");
- return ceph::net::create_sharded<AcceptTest>(
+ return crimson::net::create_sharded<AcceptTest>(
).then([] (AcceptTest* factory) {
return factory->bind_accept().then([factory] {
return seastar::when_all(
}
static future<SocketFRef, SocketFRef> get_sockets() {
- return ceph::net::create_sharded<SocketFactory>(seastar::engine().cpu_id()
+ return crimson::net::create_sharded<SocketFactory>(seastar::engine().cpu_id()
).then([] (SocketFactory* factory) {
return factory->bind_accept().then([factory] {
return connect();
#include "crimson/thread/ThreadPool.h"
using namespace std::chrono_literals;
-using ThreadPool = ceph::thread::ThreadPool;
+using ThreadPool = crimson::thread::ThreadPool;
seastar::future<> test_accumulate(ThreadPool& tp) {
static constexpr auto N = 5;